From ecf7a35b1940971bd4268bba34cd14dc9f0b6bc6 Mon Sep 17 00:00:00 2001 From: Jimmi Dyson Date: Wed, 3 Aug 2016 14:49:44 +0100 Subject: [PATCH] Uprade to Kubernetes v1.3.4 --- Godeps/Godeps.json | 1874 +++++++++-------- docs/minikube_start.md | 2 +- .../engine-api/client/container_logs.go | 4 + .../docker/engine-api/client/image_load.go | 4 +- .../docker/engine-api/client/image_pull.go | 4 +- .../docker/engine-api/client/image_push.go | 6 +- .../docker/engine-api/client/image_search.go | 9 + .../docker/engine-api/types/client.go | 4 + .../engine-api/types/container/host_config.go | 29 +- .../types/reference/image_reference.go | 2 + .../engine-api/types/registry/registry.go | 4 +- .../docker/engine-api/types/types.go | 1 + .../app/controllermanager.go | 6 +- .../apis/rbac/validation/rulevalidation.go | 4 +- .../pkg/cloudprovider/providers/aws/aws.go | 555 ++--- .../providers/aws/aws_instancegroups.go | 10 +- .../providers/aws/aws_loadbalancer.go | 48 +- .../cloudprovider/providers/aws/aws_routes.go | 34 +- .../pkg/cloudprovider/providers/gce/gce.go | 18 + .../pkg/controller/node/cidr_allocator.go | 50 +- .../pkg/controller/node/cidr_set.go | 44 +- .../pkg/controller/node/nodecontroller.go | 28 + .../pkg/controller/volume/attachdetach/OWNERS | 2 + .../attach_detach_controller.go | 12 +- .../cache/actual_state_of_world.go | 2 +- .../cache/desired_state_of_world.go | 0 .../desired_state_of_world_populator.go | 28 +- .../reconciler/reconciler.go | 25 +- .../statusupdater/fake_node_status_updater.go | 0 .../statusupdater/node_status_updater.go | 8 +- .../{ => volume}/persistentvolume/OWNERS | 1 + .../persistentvolume/controller.go | 0 .../persistentvolume/controller_base.go | 0 .../{ => volume}/persistentvolume/index.go | 0 .../persistentvolume/volume_host.go | 0 .../k8s.io/kubernetes/pkg/kubelet/kubelet.go | 12 +- .../k8s.io/kubernetes/pkg/kubelet/rkt/rkt.go | 7 - .../pkg/kubelet/volumemanager/OWNERS | 2 + .../cache/actual_state_of_world.go | 44 +- .../cache/desired_state_of_world.go | 31 +- .../desired_state_of_world_populator.go | 2 +- .../reconciler/reconciler.go | 41 +- .../volume_manager.go | 6 +- vendor/k8s.io/kubernetes/pkg/master/master.go | 1 + .../kubernetes/pkg/util/goroutinemap/OWNERS | 2 + .../exponentialbackoff/exponential_backoff.go | 120 ++ .../pkg/util/goroutinemap/goroutinemap.go | 172 +- .../kubernetes/pkg/util/mount/mount_linux.go | 10 +- .../kubernetes/pkg/util/runtime/runtime.go | 26 +- vendor/k8s.io/kubernetes/pkg/version/base.go | 2 +- .../kubernetes/pkg/volume/aws_ebs/aws_ebs.go | 15 +- .../kubernetes/pkg/volume/aws_ebs/aws_util.go | 4 +- .../kubernetes/pkg/volume/cinder/cinder.go | 4 +- .../pkg/volume/configmap/configmap.go | 23 +- .../pkg/volume/downwardapi/downwardapi.go | 10 +- .../pkg/volume/flexvolume/flexvolume.go | 16 +- .../kubernetes/pkg/volume/gce_pd/gce_pd.go | 6 + .../pkg/volume/git_repo/git_repo.go | 12 +- .../k8s.io/kubernetes/pkg/volume/rbd/rbd.go | 3 +- .../kubernetes/pkg/volume/secret/secret.go | 10 +- .../util/nestedpendingoperations/OWNERS | 2 + .../nestedpendingoperations.go | 287 +++ .../pkg/volume/util/operationexecutor/OWNERS | 2 + .../operationexecutor/operation_executor.go | 57 +- .../volume/util/volumehelper/volumehelper.go | 6 + .../persistentvolume/label/admission.go | 2 +- 66 files changed, 2204 insertions(+), 1551 deletions(-) create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/OWNERS rename vendor/k8s.io/kubernetes/pkg/controller/volume/{ => attachdetach}/attach_detach_controller.go (98%) rename vendor/k8s.io/kubernetes/pkg/controller/volume/{ => attachdetach}/cache/actual_state_of_world.go (99%) rename vendor/k8s.io/kubernetes/pkg/controller/volume/{ => attachdetach}/cache/desired_state_of_world.go (100%) rename vendor/k8s.io/kubernetes/pkg/controller/volume/{ => attachdetach}/populator/desired_state_of_world_populator.go (84%) rename vendor/k8s.io/kubernetes/pkg/controller/volume/{ => attachdetach}/reconciler/reconciler.go (88%) rename vendor/k8s.io/kubernetes/pkg/controller/volume/{ => attachdetach}/statusupdater/fake_node_status_updater.go (100%) rename vendor/k8s.io/kubernetes/pkg/controller/volume/{ => attachdetach}/statusupdater/node_status_updater.go (92%) rename vendor/k8s.io/kubernetes/pkg/controller/{ => volume}/persistentvolume/OWNERS (73%) rename vendor/k8s.io/kubernetes/pkg/controller/{ => volume}/persistentvolume/controller.go (100%) rename vendor/k8s.io/kubernetes/pkg/controller/{ => volume}/persistentvolume/controller_base.go (100%) rename vendor/k8s.io/kubernetes/pkg/controller/{ => volume}/persistentvolume/index.go (100%) rename vendor/k8s.io/kubernetes/pkg/controller/{ => volume}/persistentvolume/volume_host.go (100%) create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/OWNERS rename vendor/k8s.io/kubernetes/pkg/kubelet/{volume => volumemanager}/cache/actual_state_of_world.go (95%) rename vendor/k8s.io/kubernetes/pkg/kubelet/{volume => volumemanager}/cache/desired_state_of_world.go (92%) rename vendor/k8s.io/kubernetes/pkg/kubelet/{volume => volumemanager}/populator/desired_state_of_world_populator.go (99%) rename vendor/k8s.io/kubernetes/pkg/kubelet/{volume => volumemanager}/reconciler/reconciler.go (88%) rename vendor/k8s.io/kubernetes/pkg/kubelet/{volume => volumemanager}/volume_manager.go (98%) create mode 100644 vendor/k8s.io/kubernetes/pkg/util/goroutinemap/OWNERS create mode 100644 vendor/k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff/exponential_backoff.go create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/OWNERS create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/OWNERS diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 552985846b..0abdd7c5c7 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -612,68 +612,68 @@ }, { "ImportPath": "github.com/docker/engine-api/client", - "Comment": "v0.3.1-62-g3d72d39", - "Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2" + "Comment": "v0.3.1-78-gdea108d", + "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" }, { "ImportPath": "github.com/docker/engine-api/client/transport", - "Comment": "v0.3.1-62-g3d72d39", - "Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2" + "Comment": "v0.3.1-78-gdea108d", + "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" }, { "ImportPath": "github.com/docker/engine-api/client/transport/cancellable", - "Comment": "v0.3.1-62-g3d72d39", - "Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2" + "Comment": "v0.3.1-78-gdea108d", + "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" }, { "ImportPath": "github.com/docker/engine-api/types", - "Comment": "v0.3.1-62-g3d72d39", - "Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2" + "Comment": "v0.3.1-78-gdea108d", + "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" }, { "ImportPath": "github.com/docker/engine-api/types/blkiodev", - "Comment": "v0.3.1-62-g3d72d39", - "Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2" + "Comment": "v0.3.1-78-gdea108d", + "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" }, { "ImportPath": "github.com/docker/engine-api/types/container", - "Comment": "v0.3.1-62-g3d72d39", - "Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2" + "Comment": "v0.3.1-78-gdea108d", + "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" }, { "ImportPath": "github.com/docker/engine-api/types/filters", - "Comment": "v0.3.1-62-g3d72d39", - "Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2" + "Comment": "v0.3.1-78-gdea108d", + "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" }, { "ImportPath": "github.com/docker/engine-api/types/network", - "Comment": "v0.3.1-62-g3d72d39", - "Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2" + "Comment": "v0.3.1-78-gdea108d", + "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" }, { "ImportPath": "github.com/docker/engine-api/types/reference", - "Comment": "v0.3.1-62-g3d72d39", - "Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2" + "Comment": "v0.3.1-78-gdea108d", + "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" }, { "ImportPath": "github.com/docker/engine-api/types/registry", - "Comment": "v0.3.1-62-g3d72d39", - "Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2" + "Comment": "v0.3.1-78-gdea108d", + "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" }, { "ImportPath": "github.com/docker/engine-api/types/strslice", - "Comment": "v0.3.1-62-g3d72d39", - "Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2" + "Comment": "v0.3.1-78-gdea108d", + "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" }, { "ImportPath": "github.com/docker/engine-api/types/time", - "Comment": "v0.3.1-62-g3d72d39", - "Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2" + "Comment": "v0.3.1-78-gdea108d", + "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" }, { "ImportPath": "github.com/docker/engine-api/types/versions", - "Comment": "v0.3.1-62-g3d72d39", - "Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2" + "Comment": "v0.3.1-78-gdea108d", + "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" }, { "ImportPath": "github.com/docker/go-connections/nat", @@ -1964,2238 +1964,2248 @@ }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-apiserver/app", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-apiserver/app/options", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-controller-manager/app", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-controller-manager/app/options", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-proxy/app", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-proxy/app/options", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubelet/app", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubelet/app/options", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/admission", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/annotations", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/endpoints", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/errors", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/errors/storage", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/install", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/meta", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/meta/metatypes", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/pod", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/resource", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/rest", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/service", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/unversioned", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/unversioned/validation", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/util", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/validation", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apimachinery", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apimachinery/registered", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac/latest", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac/v0", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac/v1beta1", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/install", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1alpha1", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/validation", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication.k8s.io", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/install", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/install", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v1", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/validation", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/install", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/validation", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/install", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/install", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/validation", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/install", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/v1alpha1", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/validation", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/install", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/validation", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apiserver", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apiserver/authenticator", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/apiserver/metrics", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/auth/authenticator", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/auth/authenticator/bearertoken", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/auth/authorizer", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/auth/authorizer/abac", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/auth/authorizer/union", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/auth/handlers", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/auth/user", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/capabilities", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/cache", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/chaosclient", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/fake", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/leaderelection", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/metrics", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/record", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/restclient", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/testing/core", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/transport", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/typed/discovery", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/typed/discovery/fake", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/typed/dynamic", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/auth", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/aws", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/mesos", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/daemon", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/deployment", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/endpoint", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/framework", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/framework/informers", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/garbagecollector", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/gc", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/job", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/namespace", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/node", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/controller/persistentvolume", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/petset", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/podautoscaler", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/replicaset", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/replication", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/resourcequota", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/route", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/service", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/serviceaccount", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/volume", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach", + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/cache", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache", + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/populator", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator", + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/reconciler", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler", + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/statusupdater", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater", + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/persistentvolume", + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/conversion", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/conversion/queryparams", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/aws", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/gcp", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/fieldpath", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/fields", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/genericapiserver", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/genericapiserver/options", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/healthz", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/httplog", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cadvisor", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/client", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/config", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/container", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/custommetrics", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockertools", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/envvars", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/eviction", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/leaky", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/lifecycle", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/metrics", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/cni", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/exec", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/hairpin", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/hostport", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/kubenet", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/pleg", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/pod", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/prober", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/prober/results", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos/util", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/rkt", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/portforward", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/remotecommand", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/stats", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/status", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/types", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/cache", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/format", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/ioutils", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/queue", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volume", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager", + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volume/cache", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache", + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volume/populator", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator", + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volume/reconciler", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler", + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/labels", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/master", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/master/ports", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/exec", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/http", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/tcp", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/config", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/iptables", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/userspace", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota/evaluator/core", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota/generic", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota/install", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/cachesize", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/clusterrole", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/clusterrole/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/clusterrole/policybased", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/clusterrolebinding", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/clusterrolebinding/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/clusterrolebinding/policybased", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/componentstatus", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/configmap", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/configmap/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/controller", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/controller/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/daemonset", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/daemonset/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/deployment", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/deployment/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/endpoint", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/endpoint/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/event", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/event/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/experimental/controller/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/generic", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/generic/registry", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/generic/rest", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/horizontalpodautoscaler", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/horizontalpodautoscaler/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/ingress", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/ingress/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/job", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/job/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/limitrange", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/limitrange/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/namespace", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/namespace/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/networkpolicy", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/networkpolicy/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/node", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/node/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/node/rest", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/persistentvolume", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/persistentvolume/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/persistentvolumeclaim", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/persistentvolumeclaim/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/petset", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/petset/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/pod", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/pod/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/pod/rest", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/poddisruptionbudget", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/poddisruptionbudget/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/podsecuritypolicy", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/podsecuritypolicy/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/podtemplate", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/podtemplate/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/replicaset", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/replicaset/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/resourcequota", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/resourcequota/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/role", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/role/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/role/policybased", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rolebinding", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rolebinding/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rolebinding/policybased", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/secret", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/secret/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/service", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/service/allocator", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/service/allocator/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/service/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/service/ipallocator", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/service/ipallocator/controller", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/service/portallocator", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/service/portallocator/controller", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/serviceaccount", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/serviceaccount/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/thirdpartyresource", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/thirdpartyresource/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/runtime", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/json", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/protobuf", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/recognizer", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/streaming", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/versioning", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/group", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/user", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/securitycontext", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/serviceaccount", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/ssh", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/storage", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/storage/etcd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/storage/etcd/metrics", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/storage/etcd/util", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/storage/etcd3", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/storage/storagebackend", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/storage/storagebackend/factory", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/types", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/ui", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/ui/data/swagger", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/bandwidth", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/cache", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/chmod", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/chown", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/config", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/configz", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/crypto", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/dbus", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/deployment", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/diff", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/env", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/errors", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/exec", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/flock", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/flowcontrol", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/flushwriter", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/framer", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/goroutinemap", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff", + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/hash", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/homedir", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/httpstream", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/httpstream/spdy", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/integer", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/intstr", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/io", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/iptables", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/json", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/keymutex", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/labels", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/limitwriter", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/metrics", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/mount", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/net", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/net/sets", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/node", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/oom", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/parsers", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/pod", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/procfs", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/proxy", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/rand", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/replicaset", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/runtime", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/selinux", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/sets", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/slice", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/strategicpatch", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/strings", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/sysctl", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/system", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/validation", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/validation/field", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/wait", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/workqueue", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/wsstream", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/yaml", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/version", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/aws_ebs", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/azure_file", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/cephfs", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/cinder", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/configmap", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/downwardapi", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/empty_dir", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/fc", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/flexvolume", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/flocker", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/gce_pd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/git_repo", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/glusterfs", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/host_path", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/iscsi", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/nfs", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/rbd", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/secret", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations", + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/operationexecutor", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/types", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/volumehelper", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/vsphere_volume", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/watch", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/pkg/watch/versioned", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/admit", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/antiaffinity", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/deny", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/exec", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/initialresources", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/limitranger", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/namespace/exists", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/namespace/lifecycle", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authenticator/password/keystone", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authenticator/password/passwordfile", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/basicauth", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/union", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/x509", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/tokenfile", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth/gcp", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth/oidc", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api/v1", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api/validation", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/factory", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/metrics", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/webhook", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/json", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/reflect", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/third_party/golang/expansion", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" }, { "ImportPath": "k8s.io/kubernetes/third_party/golang/netutil", - "Comment": "v1.3.3", - "Rev": "c6411395e09da356c608896d3d9725acab821418" + "Comment": "v1.3.4", + "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" } ] } diff --git a/docs/minikube_start.md b/docs/minikube_start.md index 10a45f2ce2..60f31b93bc 100644 --- a/docs/minikube_start.md +++ b/docs/minikube_start.md @@ -20,7 +20,7 @@ minikube start --docker-env=[]: Environment variables to pass to the Docker daemon. (format: key=value) --insecure-registry=[]: Insecure Docker registries to pass to the Docker daemon --iso-url="https://storage.googleapis.com/minikube/minikube-0.5.iso": Location of the minikube iso - --kubernetes-version="v1.3.3": The kubernetes version that the minikube VM will (ex: v1.2.3) + --kubernetes-version="v1.3.4": The kubernetes version that the minikube VM will (ex: v1.2.3) OR a URI which contains a localkube binary (ex: https://storage.googleapis.com/minikube/k8sReleases/v1.3.0/localkube-linux-amd64) --memory=1024: Amount of RAM allocated to the minikube VM --vm-driver="virtualbox": VM driver is one of: [virtualbox vmwarefusion kvm xhyve] diff --git a/vendor/github.com/docker/engine-api/client/container_logs.go b/vendor/github.com/docker/engine-api/client/container_logs.go index 9699ac7dde..08b9b91876 100644 --- a/vendor/github.com/docker/engine-api/client/container_logs.go +++ b/vendor/github.com/docker/engine-api/client/container_logs.go @@ -35,6 +35,10 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options query.Set("timestamps", "1") } + if options.Details { + query.Set("details", "1") + } + if options.Follow { query.Set("follow", "1") } diff --git a/vendor/github.com/docker/engine-api/client/image_load.go b/vendor/github.com/docker/engine-api/client/image_load.go index 84ee19c309..72f55fdc01 100644 --- a/vendor/github.com/docker/engine-api/client/image_load.go +++ b/vendor/github.com/docker/engine-api/client/image_load.go @@ -10,8 +10,8 @@ import ( ) // ImageLoad loads an image in the docker host from the client host. -// It's up to the caller to close the io.ReadCloser returned by -// this function. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { v := url.Values{} v.Set("quiet", "0") diff --git a/vendor/github.com/docker/engine-api/client/image_pull.go b/vendor/github.com/docker/engine-api/client/image_pull.go index 0584f00bd4..e2c49ec52b 100644 --- a/vendor/github.com/docker/engine-api/client/image_pull.go +++ b/vendor/github.com/docker/engine-api/client/image_pull.go @@ -27,12 +27,12 @@ func (cli *Client) ImagePull(ctx context.Context, ref string, options types.Imag query := url.Values{} query.Set("fromImage", repository) - if tag != "" { + if tag != "" && !options.All { query.Set("tag", tag) } resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized { + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { return nil, privilegeErr diff --git a/vendor/github.com/docker/engine-api/client/image_push.go b/vendor/github.com/docker/engine-api/client/image_push.go index 8134f8018c..9c837a76d1 100644 --- a/vendor/github.com/docker/engine-api/client/image_push.go +++ b/vendor/github.com/docker/engine-api/client/image_push.go @@ -10,7 +10,6 @@ import ( distreference "github.com/docker/distribution/reference" "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" ) // ImagePush requests the docker host to push an image to a remote registry. @@ -27,7 +26,10 @@ func (cli *Client) ImagePush(ctx context.Context, ref string, options types.Imag return nil, errors.New("cannot push a digest reference") } - tag := reference.GetTagFromNamedRef(distributionRef) + var tag = "" + if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged { + tag = nameTaggedRef.Tag() + } query := url.Values{} query.Set("tag", tag) diff --git a/vendor/github.com/docker/engine-api/client/image_search.go b/vendor/github.com/docker/engine-api/client/image_search.go index 3528bda6bd..571ba3df36 100644 --- a/vendor/github.com/docker/engine-api/client/image_search.go +++ b/vendor/github.com/docker/engine-api/client/image_search.go @@ -6,6 +6,7 @@ import ( "net/url" "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" "github.com/docker/engine-api/types/registry" "golang.org/x/net/context" ) @@ -17,6 +18,14 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I query := url.Values{} query.Set("term", term) + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) if resp.statusCode == http.StatusUnauthorized { newAuthHeader, privilegeErr := options.PrivilegeFunc() diff --git a/vendor/github.com/docker/engine-api/types/client.go b/vendor/github.com/docker/engine-api/types/client.go index a345341490..fa3b2cfb45 100644 --- a/vendor/github.com/docker/engine-api/types/client.go +++ b/vendor/github.com/docker/engine-api/types/client.go @@ -57,6 +57,7 @@ type ContainerLogsOptions struct { Timestamps bool Follow bool Tail string + Details bool } // ContainerRemoveOptions holds parameters to remove containers. @@ -172,12 +173,14 @@ type ImageListOptions struct { // ImageLoadResponse returns information to the client about a load process. type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak Body io.ReadCloser JSON bool } // ImagePullOptions holds information to pull images. type ImagePullOptions struct { + All bool RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry PrivilegeFunc RequestPrivilegeFunc } @@ -203,6 +206,7 @@ type ImageRemoveOptions struct { type ImageSearchOptions struct { RegistryAuth string PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args } // ImageTagOptions holds parameters to tag an image diff --git a/vendor/github.com/docker/engine-api/types/container/host_config.go b/vendor/github.com/docker/engine-api/types/container/host_config.go index 39f6a22516..2446c1904d 100644 --- a/vendor/github.com/docker/engine-api/types/container/host_config.go +++ b/vendor/github.com/docker/engine-api/types/container/host_config.go @@ -136,30 +136,49 @@ func (n UTSMode) Valid() bool { return true } -// PidMode represents the pid stack of the container. +// PidMode represents the pid namespace of the container. type PidMode string -// IsPrivate indicates whether the container uses its private pid stack. +// IsPrivate indicates whether the container uses its own new pid namespace. func (n PidMode) IsPrivate() bool { - return !(n.IsHost()) + return !(n.IsHost() || n.IsContainer()) } -// IsHost indicates whether the container uses the host's pid stack. +// IsHost indicates whether the container uses the host's pid namespace. func (n PidMode) IsHost() bool { return n == "host" } -// Valid indicates whether the pid stack is valid. +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the pid namespace is valid. func (n PidMode) Valid() bool { parts := strings.Split(string(n), ":") switch mode := parts[0]; mode { case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } default: return false } return true } +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + // DeviceMapping represents the device mapping between the host and the container. type DeviceMapping struct { PathOnHost string diff --git a/vendor/github.com/docker/engine-api/types/reference/image_reference.go b/vendor/github.com/docker/engine-api/types/reference/image_reference.go index 7420158206..be9cf8ebed 100644 --- a/vendor/github.com/docker/engine-api/types/reference/image_reference.go +++ b/vendor/github.com/docker/engine-api/types/reference/image_reference.go @@ -27,6 +27,8 @@ func GetTagFromNamedRef(ref distreference.Named) string { tag = x.Digest().String() case distreference.NamedTagged: tag = x.Tag() + default: + tag = "latest" } return tag } diff --git a/vendor/github.com/docker/engine-api/types/registry/registry.go b/vendor/github.com/docker/engine-api/types/registry/registry.go index 8a6fe70ea7..d2aca6f024 100644 --- a/vendor/github.com/docker/engine-api/types/registry/registry.go +++ b/vendor/github.com/docker/engine-api/types/registry/registry.go @@ -78,12 +78,10 @@ type IndexInfo struct { type SearchResult struct { // StarCount indicates the number of stars this repository has StarCount int `json:"star_count"` - // IsOfficial indicates whether the result is an official repository or not + // IsOfficial is true if the result is from an official repository. IsOfficial bool `json:"is_official"` // Name is the name of the repository Name string `json:"name"` - // IsTrusted indicates whether the result is trusted - IsTrusted bool `json:"is_trusted"` // IsAutomated indicates whether the result is automated IsAutomated bool `json:"is_automated"` // Description is a textual description of the repository diff --git a/vendor/github.com/docker/engine-api/types/types.go b/vendor/github.com/docker/engine-api/types/types.go index 406b561a6e..cb2dc9ac9d 100644 --- a/vendor/github.com/docker/engine-api/types/types.go +++ b/vendor/github.com/docker/engine-api/types/types.go @@ -395,6 +395,7 @@ type Volume struct { Mountpoint string // Mountpoint is the location on disk of the volume Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume Labels map[string]string // Labels is metadata specific to the volume + Scope string // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level) } // VolumesListResponse contains the response for the remote API: diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go index 90009fb9a0..247db27dd8 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go @@ -56,7 +56,6 @@ import ( "k8s.io/kubernetes/pkg/controller/job" namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace" nodecontroller "k8s.io/kubernetes/pkg/controller/node" - persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/persistentvolume" petset "k8s.io/kubernetes/pkg/controller/petset" "k8s.io/kubernetes/pkg/controller/podautoscaler" "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" @@ -66,7 +65,8 @@ import ( routecontroller "k8s.io/kubernetes/pkg/controller/route" servicecontroller "k8s.io/kubernetes/pkg/controller/service" serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount" - "k8s.io/kubernetes/pkg/controller/volume" + "k8s.io/kubernetes/pkg/controller/volume/attachdetach" + persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" "k8s.io/kubernetes/pkg/healthz" quotainstall "k8s.io/kubernetes/pkg/quota/install" "k8s.io/kubernetes/pkg/serviceaccount" @@ -408,7 +408,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) attachDetachController, attachDetachControllerErr := - volume.NewAttachDetachController( + attachdetach.NewAttachDetachController( clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "attachdetach-controller")), podInformer, nodeInformer, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/validation/rulevalidation.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/validation/rulevalidation.go index 3002f98842..57c1db9361 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/validation/rulevalidation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/validation/rulevalidation.go @@ -25,6 +25,7 @@ import ( apierrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/auth/user" + "k8s.io/kubernetes/pkg/serviceaccount" utilerrors "k8s.io/kubernetes/pkg/util/errors" ) @@ -201,8 +202,7 @@ func appliesToUser(user user.Info, subject rbac.Subject) (bool, error) { if subject.Namespace == "" { return false, fmt.Errorf("subject of kind service account without specified namespace") } - // TODO(ericchiang): Is there a better way of matching a service account name? - return "system:serviceaccount:"+subject.Name+":"+subject.Namespace == user.GetName(), nil + return serviceaccount.MakeUsername(subject.Namespace, subject.Name) == user.GetName(), nil default: return false, fmt.Errorf("unknown subject kind: %s", subject.Kind) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go index df406e0645..a2f9836f30 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go @@ -51,42 +51,54 @@ import ( "k8s.io/kubernetes/pkg/volume" ) +// ProviderName is the name of this cloud provider. const ProviderName = "aws" -// The tag name we use to differentiate multiple logically independent clusters running in the same AZ +// TagNameKubernetesCluster is the tag name we use to differentiate multiple +// logically independent clusters running in the same AZ const TagNameKubernetesCluster = "KubernetesCluster" -// The tag name we use to differentiate multiple services. Used currently for ELBs only. +// TagNameKubernetesService is the tag name we use to differentiate multiple +// services. Used currently for ELBs only. const TagNameKubernetesService = "kubernetes.io/service-name" -// The tag name used on a subnet to designate that it should be used for internal ELBs +// TagNameSubnetInternalELB is the tag name used on a subnet to designate that +// it should be used for internal ELBs const TagNameSubnetInternalELB = "kubernetes.io/role/internal-elb" -// The tag name used on a subnet to designate that it should be used for internet ELBs +// TagNameSubnetPublicELB is the tag name used on a subnet to designate that +// it should be used for internet ELBs const TagNameSubnetPublicELB = "kubernetes.io/role/elb" -// Annotation used on the service to indicate that we want an internal ELB. +// ServiceAnnotationLoadBalancerInternal is the annotation used on the service +// to indicate that we want an internal ELB. // Currently we accept only the value "0.0.0.0/0" - other values are an error. // This lets us define more advanced semantics in future. const ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/aws-load-balancer-internal" -// Annotation used on the service to enable the proxy protocol on an ELB. Right now we only -// accept the value "*" which means enable the proxy protocol on all ELB backends. In the -// future we could adjust this to allow setting the proxy protocol only on certain backends. +// ServiceAnnotationLoadBalancerProxyProtocol is the annotation used on the +// service to enable the proxy protocol on an ELB. Right now we only accept the +// value "*" which means enable the proxy protocol on all ELB backends. In the +// future we could adjust this to allow setting the proxy protocol only on +// certain backends. const ServiceAnnotationLoadBalancerProxyProtocol = "service.beta.kubernetes.io/aws-load-balancer-proxy-protocol" -// Service annotation requesting a secure listener. Value is a valid certificate ARN. +// ServiceAnnotationLoadBalancerCertificate is the annotation used on the +// service to request a secure listener. Value is a valid certificate ARN. // For more, see http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html // CertARN is an IAM or CM certificate ARN, e.g. arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 const ServiceAnnotationLoadBalancerCertificate = "service.beta.kubernetes.io/aws-load-balancer-ssl-cert" -// Service annotation specifying a comma-separated list of ports that will use SSL/HTTPS +// ServiceAnnotationLoadBalancerSSLPorts is the annotation used on the service +// to specify a comma-separated list of ports that will use SSL/HTTPS // listeners. Defaults to '*' (all). const ServiceAnnotationLoadBalancerSSLPorts = "service.beta.kubernetes.io/aws-load-balancer-ssl-ports" -// Service annotation specifying the protocol spoken by the backend (pod) behind a secure listener. +// ServiceAnnotationLoadBalancerBEProtocol is the annotation used on the service +// to specify the protocol spoken by the backend (pod) behind a secure listener. // Only inspected when `aws-load-balancer-ssl-cert` is used. -// If `http` (default) or `https`, an HTTPS listener that terminates the connection and parses headers is created. +// If `http` (default) or `https`, an HTTPS listener that terminates the +// connection and parses headers is created. // If set to `ssl` or `tcp`, a "raw" SSL listener is used. const ServiceAnnotationLoadBalancerBEProtocol = "service.beta.kubernetes.io/aws-load-balancer-backend-protocol" @@ -98,36 +110,36 @@ var backendProtocolMapping = map[string]string{ "tcp": "ssl", } -// We sometimes read to see if something exists; then try to create it if we didn't find it +// MaxReadThenCreateRetries sets the maximum number of attempts we will make when +// we read to see if something exists and then try to create it if we didn't find it. // This can fail once in a consistent system if done in parallel // In an eventually consistent system, it could fail unboundedly -// MaxReadThenCreateRetries sets the maximum number of attempts we will make const MaxReadThenCreateRetries = 30 -// Default volume type for newly created Volumes +// DefaultVolumeType specifies which storage to use for newly created Volumes // TODO: Remove when user/admin can configure volume types and thus we don't // need hardcoded defaults. const DefaultVolumeType = "gp2" -// Amazon recommends having no more that 40 volumes attached to an instance, -// and at least one of those is for the system root volume. +// DefaultMaxEBSVolumes is the limit for volumes attached to an instance. +// Amazon recommends no more than 40; the system root volume uses at least one. // See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits const DefaultMaxEBSVolumes = 39 // Used to call aws_credentials.Init() just once var once sync.Once -// Abstraction over AWS, to allow mocking/other implementations -type AWSServices interface { +// Services is an abstraction over AWS, to allow mocking/other implementations +type Services interface { Compute(region string) (EC2, error) LoadBalancing(region string) (ELB, error) Autoscaling(region string) (ASG, error) Metadata() (EC2Metadata, error) } -// TODO: Should we rename this to AWS (EBS & ELB are not technically part of EC2) -// Abstraction over EC2, to allow mocking/other implementations +// EC2 is an abstraction over AWS', to allow mocking/other implementations // Note that the DescribeX functions return a list, so callers don't need to deal with paging +// TODO: Should we rename this to AWS (EBS & ELB are not technically part of EC2) type EC2 interface { // Query EC2 for instances matching the filter DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) @@ -162,7 +174,7 @@ type EC2 interface { ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) } -// This is a simple pass-through of the ELB client interface, which allows for testing +// ELB is a simple pass-through of AWS' ELB client interface, which allows for testing type ELB interface { CreateLoadBalancer(*elb.CreateLoadBalancerInput) (*elb.CreateLoadBalancerOutput, error) DeleteLoadBalancer(*elb.DeleteLoadBalancerInput) (*elb.DeleteLoadBalancerOutput, error) @@ -183,18 +195,20 @@ type ELB interface { ConfigureHealthCheck(*elb.ConfigureHealthCheckInput) (*elb.ConfigureHealthCheckOutput, error) } -// This is a simple pass-through of the Autoscaling client interface, which allows for testing +// ASG is a simple pass-through of the Autoscaling client interface, which +// allows for testing. type ASG interface { UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error) DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) } -// Abstraction over the AWS metadata service +// EC2Metadata is an abstraction over the AWS metadata service. type EC2Metadata interface { // Query the EC2 metadata service (used to discover instance-id etc) GetMetadata(path string) (string, error) } +// VolumeOptions specifies capacity and tags for a volume. type VolumeOptions struct { CapacityGB int Tags map[string]string @@ -246,13 +260,13 @@ type InstanceGroupInfo interface { CurrentSize() (int, error) } -// AWSCloud is an implementation of Interface, LoadBalancer and Instances for Amazon Web Services. -type AWSCloud struct { +// Cloud is an implementation of Interface, LoadBalancer and Instances for Amazon Web Services. +type Cloud struct { ec2 EC2 elb ELB asg ASG metadata EC2Metadata - cfg *AWSCloudConfig + cfg *CloudConfig region string vpcID string @@ -267,9 +281,10 @@ type AWSCloud struct { lastInstancesByNodeNames []*ec2.Instance } -var _ Volumes = &AWSCloud{} +var _ Volumes = &Cloud{} -type AWSCloudConfig struct { +// CloudConfig wraps the settings for the AWS cloud provider. +type CloudConfig struct { Global struct { // TODO: Is there any use for this? We can get it from the instance metadata service // Maybe if we're not running on AWS, e.g. bootstrap; for now it is not very useful @@ -421,22 +436,24 @@ func newEc2Filter(name string, value string) *ec2.Filter { return filter } -func (self *AWSCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error { +// AddSSHKeyToAllInstances is currently not implemented. +func (c *Cloud) AddSSHKeyToAllInstances(user string, keyData []byte) error { return errors.New("unimplemented") } -func (c *AWSCloud) CurrentNodeName(hostname string) (string, error) { +// CurrentNodeName returns the name of the current node +func (c *Cloud) CurrentNodeName(hostname string) (string, error) { return c.selfAWSInstance.nodeName, nil } // Implementation of EC2.Instances -func (self *awsSdkEC2) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) { +func (s *awsSdkEC2) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) { // Instances are paged results := []*ec2.Instance{} var nextToken *string for { - response, err := self.ec2.DescribeInstances(request) + response, err := s.ec2.DescribeInstances(request) if err != nil { return nil, fmt.Errorf("error listing AWS instances: %v", err) } @@ -571,8 +588,8 @@ func init() { } // readAWSCloudConfig reads an instance of AWSCloudConfig from config reader. -func readAWSCloudConfig(config io.Reader, metadata EC2Metadata) (*AWSCloudConfig, error) { - var cfg AWSCloudConfig +func readAWSCloudConfig(config io.Reader, metadata EC2Metadata) (*CloudConfig, error) { + var cfg CloudConfig var err error if config != nil { @@ -627,7 +644,7 @@ func azToRegion(az string) (string, error) { // newAWSCloud creates a new instance of AWSCloud. // AWSProvider and instanceId are primarily for tests -func newAWSCloud(config io.Reader, awsServices AWSServices) (*AWSCloud, error) { +func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) { metadata, err := awsServices.Metadata() if err != nil { return nil, fmt.Errorf("error creating AWS metadata client: %v", err) @@ -667,7 +684,7 @@ func newAWSCloud(config io.Reader, awsServices AWSServices) (*AWSCloud, error) { return nil, fmt.Errorf("error creating AWS autoscaling client: %v", err) } - awsCloud := &AWSCloud{ + awsCloud := &Cloud{ ec2: ec2, elb: elb, asg: asg, @@ -719,42 +736,43 @@ func newAWSCloud(config io.Reader, awsServices AWSServices) (*AWSCloud, error) { return awsCloud, nil } -func (aws *AWSCloud) Clusters() (cloudprovider.Clusters, bool) { +// Clusters returns the list of clusters. +func (c *Cloud) Clusters() (cloudprovider.Clusters, bool) { return nil, false } // ProviderName returns the cloud provider ID. -func (aws *AWSCloud) ProviderName() string { +func (c *Cloud) ProviderName() string { return ProviderName } // ScrubDNS filters DNS settings for pods. -func (aws *AWSCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) { +func (c *Cloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) { return nameservers, searches } // LoadBalancer returns an implementation of LoadBalancer for Amazon Web Services. -func (s *AWSCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { - return s, true +func (c *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { + return c, true } // Instances returns an implementation of Instances for Amazon Web Services. -func (aws *AWSCloud) Instances() (cloudprovider.Instances, bool) { - return aws, true +func (c *Cloud) Instances() (cloudprovider.Instances, bool) { + return c, true } // Zones returns an implementation of Zones for Amazon Web Services. -func (aws *AWSCloud) Zones() (cloudprovider.Zones, bool) { - return aws, true +func (c *Cloud) Zones() (cloudprovider.Zones, bool) { + return c, true } // Routes returns an implementation of Routes for Amazon Web Services. -func (aws *AWSCloud) Routes() (cloudprovider.Routes, bool) { - return aws, true +func (c *Cloud) Routes() (cloudprovider.Routes, bool) { + return c, true } // NodeAddresses is an implementation of Instances.NodeAddresses. -func (c *AWSCloud) NodeAddresses(name string) ([]api.NodeAddress, error) { +func (c *Cloud) NodeAddresses(name string) ([]api.NodeAddress, error) { if c.selfAWSInstance.nodeName == name || len(name) == 0 { addresses := []api.NodeAddress{} @@ -779,7 +797,7 @@ func (c *AWSCloud) NodeAddresses(name string) ([]api.NodeAddress, error) { } instance, err := c.getInstanceByNodeName(name) if err != nil { - return nil, err + return nil, fmt.Errorf("getInstanceByNodeName failed for %q with %v", name, err) } addresses := []api.NodeAddress{} @@ -810,61 +828,58 @@ func (c *AWSCloud) NodeAddresses(name string) ([]api.NodeAddress, error) { } // ExternalID returns the cloud provider ID of the specified instance (deprecated). -func (c *AWSCloud) ExternalID(name string) (string, error) { +func (c *Cloud) ExternalID(name string) (string, error) { if c.selfAWSInstance.nodeName == name { // We assume that if this is run on the instance itself, the instance exists and is alive return c.selfAWSInstance.awsID, nil - } else { - // We must verify that the instance still exists - // Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound) - instance, err := c.findInstanceByNodeName(name) - if err != nil { - return "", err - } - if instance == nil { - return "", cloudprovider.InstanceNotFound - } - return orEmpty(instance.InstanceId), nil } + // We must verify that the instance still exists + // Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound) + instance, err := c.findInstanceByNodeName(name) + if err != nil { + return "", err + } + if instance == nil { + return "", cloudprovider.InstanceNotFound + } + return orEmpty(instance.InstanceId), nil } // InstanceID returns the cloud provider ID of the specified instance. -func (c *AWSCloud) InstanceID(name string) (string, error) { +func (c *Cloud) InstanceID(name string) (string, error) { // In the future it is possible to also return an endpoint as: // // if c.selfAWSInstance.nodeName == name { return "/" + c.selfAWSInstance.availabilityZone + "/" + c.selfAWSInstance.awsID, nil - } else { - inst, err := c.getInstanceByNodeName(name) - if err != nil { - return "", err - } - return "/" + orEmpty(inst.Placement.AvailabilityZone) + "/" + orEmpty(inst.InstanceId), nil } + inst, err := c.getInstanceByNodeName(name) + if err != nil { + return "", fmt.Errorf("getInstanceByNodeName failed for %q with %v", name, err) + } + return "/" + orEmpty(inst.Placement.AvailabilityZone) + "/" + orEmpty(inst.InstanceId), nil } // InstanceType returns the type of the specified instance. -func (c *AWSCloud) InstanceType(name string) (string, error) { +func (c *Cloud) InstanceType(name string) (string, error) { if c.selfAWSInstance.nodeName == name { return c.selfAWSInstance.instanceType, nil - } else { - inst, err := c.getInstanceByNodeName(name) - if err != nil { - return "", err - } - return orEmpty(inst.InstanceType), nil } + inst, err := c.getInstanceByNodeName(name) + if err != nil { + return "", fmt.Errorf("getInstanceByNodeName failed for %q with %v", name, err) + } + return orEmpty(inst.InstanceType), nil } // Return a list of instances matching regex string. -func (s *AWSCloud) getInstancesByRegex(regex string) ([]string, error) { +func (c *Cloud) getInstancesByRegex(regex string) ([]string, error) { filters := []*ec2.Filter{newEc2Filter("instance-state-name", "running")} - filters = s.addFilters(filters) + filters = c.addFilters(filters) request := &ec2.DescribeInstancesInput{ Filters: filters, } - instances, err := s.ec2.DescribeInstances(request) + instances, err := c.ec2.DescribeInstances(request) if err != nil { return []string{}, err } @@ -910,14 +925,14 @@ func (s *AWSCloud) getInstancesByRegex(regex string) ([]string, error) { } // List is an implementation of Instances.List. -func (aws *AWSCloud) List(filter string) ([]string, error) { +func (c *Cloud) List(filter string) ([]string, error) { // TODO: Should really use tag query. No need to go regexp. - return aws.getInstancesByRegex(filter) + return c.getInstancesByRegex(filter) } // getAllZones retrieves a list of all the zones in which nodes are running // It currently involves querying all instances -func (c *AWSCloud) getAllZones() (sets.String, error) { +func (c *Cloud) getAllZones() (sets.String, error) { // We don't currently cache this; it is currently used only in volume // creation which is expected to be a comparatively rare occurence. @@ -959,7 +974,7 @@ func (c *AWSCloud) getAllZones() (sets.String, error) { } // GetZone implements Zones.GetZone -func (c *AWSCloud) GetZone() (cloudprovider.Zone, error) { +func (c *Cloud) GetZone() (cloudprovider.Zone, error) { return cloudprovider.Zone{ FailureDomain: c.selfAWSInstance.availabilityZone, Region: c.region, @@ -1026,28 +1041,28 @@ func newAWSInstance(ec2Service EC2, instance *ec2.Instance) *awsInstance { } // Gets the awsInstanceType that models the instance type of this instance -func (self *awsInstance) getInstanceType() *awsInstanceType { +func (i *awsInstance) getInstanceType() *awsInstanceType { // TODO: Make this real awsInstanceType := &awsInstanceType{} return awsInstanceType } // Gets the full information about this instance from the EC2 API -func (self *awsInstance) describeInstance() (*ec2.Instance, error) { - instanceID := self.awsID +func (i *awsInstance) describeInstance() (*ec2.Instance, error) { + instanceID := i.awsID request := &ec2.DescribeInstancesInput{ InstanceIds: []*string{&instanceID}, } - instances, err := self.ec2.DescribeInstances(request) + instances, err := i.ec2.DescribeInstances(request) if err != nil { return nil, err } if len(instances) == 0 { - return nil, fmt.Errorf("no instances found for instance: %s", self.awsID) + return nil, fmt.Errorf("no instances found for instance: %s", i.awsID) } if len(instances) > 1 { - return nil, fmt.Errorf("multiple instances found for instance: %s", self.awsID) + return nil, fmt.Errorf("multiple instances found for instance: %s", i.awsID) } return instances[0], nil } @@ -1055,19 +1070,19 @@ func (self *awsInstance) describeInstance() (*ec2.Instance, error) { // Gets the mountDevice already assigned to the volume, or assigns an unused mountDevice. // If the volume is already assigned, this will return the existing mountDevice with alreadyAttached=true. // Otherwise the mountDevice is assigned by finding the first available mountDevice, and it is returned with alreadyAttached=false. -func (self *awsInstance) getMountDevice(volumeID string, assign bool) (assigned mountDevice, alreadyAttached bool, err error) { - instanceType := self.getInstanceType() +func (i *awsInstance) getMountDevice(volumeID string, assign bool) (assigned mountDevice, alreadyAttached bool, err error) { + instanceType := i.getInstanceType() if instanceType == nil { - return "", false, fmt.Errorf("could not get instance type for instance: %s", self.awsID) + return "", false, fmt.Errorf("could not get instance type for instance: %s", i.awsID) } // We lock to prevent concurrent mounts from conflicting // We may still conflict if someone calls the API concurrently, // but the AWS API will then fail one of the two attach operations - self.mutex.Lock() - defer self.mutex.Unlock() + i.mutex.Lock() + defer i.mutex.Unlock() - info, err := self.describeInstance() + info, err := i.describeInstance() if err != nil { return "", false, err } @@ -1086,7 +1101,7 @@ func (self *awsInstance) getMountDevice(volumeID string, assign bool) (assigned deviceMappings[mountDevice(name)] = aws.StringValue(blockDevice.Ebs.VolumeId) } - for mountDevice, volume := range self.attaching { + for mountDevice, volume := range i.attaching { deviceMappings[mountDevice] = volume } @@ -1118,20 +1133,20 @@ func (self *awsInstance) getMountDevice(volumeID string, assign bool) (assigned if chosen == "" { glog.Warningf("Could not assign a mount device (all in use?). mappings=%v", deviceMappings) - return "", false, fmt.Errorf("Too many EBS volumes attached to node %s.", self.nodeName) + return "", false, fmt.Errorf("Too many EBS volumes attached to node %s.", i.nodeName) } - self.attaching[chosen] = volumeID + i.attaching[chosen] = volumeID glog.V(2).Infof("Assigned mount device %s -> volume %s", chosen, volumeID) return chosen, false, nil } -func (self *awsInstance) endAttaching(volumeID string, mountDevice mountDevice) { - self.mutex.Lock() - defer self.mutex.Unlock() +func (i *awsInstance) endAttaching(volumeID string, mountDevice mountDevice) { + i.mutex.Lock() + defer i.mutex.Unlock() - existingVolumeID, found := self.attaching[mountDevice] + existingVolumeID, found := i.attaching[mountDevice] if !found { glog.Errorf("endAttaching on non-allocated device") return @@ -1141,7 +1156,7 @@ func (self *awsInstance) endAttaching(volumeID string, mountDevice mountDevice) return } glog.V(2).Infof("Releasing mount device mapping: %s -> volume %s", mountDevice, volumeID) - delete(self.attaching, mountDevice) + delete(i.attaching, mountDevice) } type awsDisk struct { @@ -1153,7 +1168,7 @@ type awsDisk struct { awsID string } -func newAWSDisk(aws *AWSCloud, name string) (*awsDisk, error) { +func newAWSDisk(aws *Cloud, name string) (*awsDisk, error) { // name looks like aws://availability-zone/id // The original idea of the URL-style name was to put the AZ into the @@ -1191,35 +1206,35 @@ func newAWSDisk(aws *AWSCloud, name string) (*awsDisk, error) { } // Gets the full information about this volume from the EC2 API -func (self *awsDisk) describeVolume() (*ec2.Volume, error) { - volumeID := self.awsID +func (d *awsDisk) describeVolume() (*ec2.Volume, error) { + volumeID := d.awsID request := &ec2.DescribeVolumesInput{ VolumeIds: []*string{&volumeID}, } - volumes, err := self.ec2.DescribeVolumes(request) + volumes, err := d.ec2.DescribeVolumes(request) if err != nil { return nil, fmt.Errorf("error querying ec2 for volume info: %v", err) } if len(volumes) == 0 { - return nil, fmt.Errorf("no volumes found for volume: %s", self.awsID) + return nil, fmt.Errorf("no volumes found for volume: %s", d.awsID) } if len(volumes) > 1 { - return nil, fmt.Errorf("multiple volumes found for volume: %s", self.awsID) + return nil, fmt.Errorf("multiple volumes found for volume: %s", d.awsID) } return volumes[0], nil } // waitForAttachmentStatus polls until the attachment status is the expected value // TODO(justinsb): return (bool, error) -func (self *awsDisk) waitForAttachmentStatus(status string) error { +func (d *awsDisk) waitForAttachmentStatus(status string) error { // TODO: There may be a faster way to get this when we're attaching locally attempt := 0 maxAttempts := 60 for { - info, err := self.describeVolume() + info, err := d.describeVolume() if err != nil { return err } @@ -1258,9 +1273,9 @@ func (self *awsDisk) waitForAttachmentStatus(status string) error { } // Deletes the EBS disk -func (self *awsDisk) deleteVolume() (bool, error) { - request := &ec2.DeleteVolumeInput{VolumeId: aws.String(self.awsID)} - _, err := self.ec2.DeleteVolume(request) +func (d *awsDisk) deleteVolume() (bool, error) { + request := &ec2.DeleteVolumeInput{VolumeId: aws.String(d.awsID)} + _, err := d.ec2.DeleteVolume(request) if err != nil { if awsError, ok := err.(awserr.Error); ok { if awsError.Code() == "InvalidVolume.NotFound" { @@ -1274,11 +1289,11 @@ func (self *awsDisk) deleteVolume() (bool, error) { // Builds the awsInstance for the EC2 instance on which we are running. // This is called when the AWSCloud is initialized, and should not be called otherwise (because the awsInstance for the local instance is a singleton with drive mapping state) -func (c *AWSCloud) buildSelfAWSInstance() (*awsInstance, error) { +func (c *Cloud) buildSelfAWSInstance() (*awsInstance, error) { if c.selfAWSInstance != nil { panic("do not call buildSelfAWSInstance directly") } - instanceId, err := c.metadata.GetMetadata("instance-id") + instanceID, err := c.metadata.GetMetadata("instance-id") if err != nil { return nil, fmt.Errorf("error fetching instance-id from ec2 metadata service: %v", err) } @@ -1291,22 +1306,22 @@ func (c *AWSCloud) buildSelfAWSInstance() (*awsInstance, error) { // information from the instance returned by the EC2 API - it is a // single API call to get all the information, and it means we don't // have two code paths. - instance, err := c.getInstanceByID(instanceId) + instance, err := c.getInstanceByID(instanceID) if err != nil { - return nil, fmt.Errorf("error finding instance %s: %v", instanceId, err) + return nil, fmt.Errorf("error finding instance %s: %v", instanceID, err) } return newAWSInstance(c.ec2, instance), nil } // Gets the awsInstance with node-name nodeName, or the 'self' instance if nodeName == "" -func (c *AWSCloud) getAwsInstance(nodeName string) (*awsInstance, error) { +func (c *Cloud) getAwsInstance(nodeName string) (*awsInstance, error) { var awsInstance *awsInstance if nodeName == "" { awsInstance = c.selfAWSInstance } else { instance, err := c.getInstanceByNodeName(nodeName) if err != nil { - return nil, fmt.Errorf("error finding instance %s: %v", nodeName, err) + return nil, err } awsInstance = newAWSInstance(c.ec2, instance) @@ -1315,8 +1330,8 @@ func (c *AWSCloud) getAwsInstance(nodeName string) (*awsInstance, error) { return awsInstance, nil } -// Implements Volumes.AttachDisk -func (c *AWSCloud) AttachDisk(diskName string, instanceName string, readOnly bool) (string, error) { +// AttachDisk implements Volumes.AttachDisk +func (c *Cloud) AttachDisk(diskName string, instanceName string, readOnly bool) (string, error) { disk, err := newAWSDisk(c, diskName) if err != nil { return "", err @@ -1324,7 +1339,7 @@ func (c *AWSCloud) AttachDisk(diskName string, instanceName string, readOnly boo awsInstance, err := c.getAwsInstance(instanceName) if err != nil { - return "", err + return "", fmt.Errorf("error finding instance %s: %v", instanceName, err) } if readOnly { @@ -1380,15 +1395,24 @@ func (c *AWSCloud) AttachDisk(diskName string, instanceName string, readOnly boo return hostDevice, nil } -// Implements Volumes.DetachDisk -func (aws *AWSCloud) DetachDisk(diskName string, instanceName string) (string, error) { - disk, err := newAWSDisk(aws, diskName) +// DetachDisk implements Volumes.DetachDisk +func (c *Cloud) DetachDisk(diskName string, instanceName string) (string, error) { + disk, err := newAWSDisk(c, diskName) if err != nil { return "", err } - awsInstance, err := aws.getAwsInstance(instanceName) + awsInstance, err := c.getAwsInstance(instanceName) if err != nil { + if err == cloudprovider.InstanceNotFound { + // If instance no longer exists, safe to assume volume is not attached. + glog.Warningf( + "Instance %q does not exist. DetachDisk will assume disk %q is not attached to it.", + instanceName, + diskName) + return "", nil + } + return "", err } @@ -1407,7 +1431,7 @@ func (aws *AWSCloud) DetachDisk(diskName string, instanceName string) (string, e VolumeId: &disk.awsID, } - response, err := aws.ec2.DetachVolume(&request) + response, err := c.ec2.DetachVolume(&request) if err != nil { return "", fmt.Errorf("error detaching EBS volume: %v", err) } @@ -1429,8 +1453,8 @@ func (aws *AWSCloud) DetachDisk(diskName string, instanceName string) (string, e } // CreateDisk implements Volumes.CreateDisk -func (s *AWSCloud) CreateDisk(volumeOptions *VolumeOptions) (string, error) { - allZones, err := s.getAllZones() +func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (string, error) { + allZones, err := c.getAllZones() if err != nil { return "", fmt.Errorf("error querying for all zones: %v", err) } @@ -1443,7 +1467,7 @@ func (s *AWSCloud) CreateDisk(volumeOptions *VolumeOptions) (string, error) { volSize := int64(volumeOptions.CapacityGB) request.Size = &volSize request.VolumeType = aws.String(DefaultVolumeType) - response, err := s.ec2.CreateVolume(request) + response, err := c.ec2.CreateVolume(request) if err != nil { return "", err } @@ -1459,14 +1483,14 @@ func (s *AWSCloud) CreateDisk(volumeOptions *VolumeOptions) (string, error) { tags[k] = v } - if s.getClusterName() != "" { - tags[TagNameKubernetesCluster] = s.getClusterName() + if c.getClusterName() != "" { + tags[TagNameKubernetesCluster] = c.getClusterName() } if len(tags) != 0 { - if err := s.createTags(awsID, tags); err != nil { + if err := c.createTags(awsID, tags); err != nil { // delete the volume and hope it succeeds - _, delerr := s.DeleteDisk(volumeName) + _, delerr := c.DeleteDisk(volumeName) if delerr != nil { // delete did not succeed, we have a stray volume! return "", fmt.Errorf("error tagging volume %s, could not delete the volume: %v", volumeName, delerr) @@ -1477,8 +1501,8 @@ func (s *AWSCloud) CreateDisk(volumeOptions *VolumeOptions) (string, error) { return volumeName, nil } -// Implements Volumes.DeleteDisk -func (c *AWSCloud) DeleteDisk(volumeName string) (bool, error) { +// DeleteDisk implements Volumes.DeleteDisk +func (c *Cloud) DeleteDisk(volumeName string) (bool, error) { awsDisk, err := newAWSDisk(c, volumeName) if err != nil { return false, err @@ -1486,8 +1510,8 @@ func (c *AWSCloud) DeleteDisk(volumeName string) (bool, error) { return awsDisk.deleteVolume() } -// Implements Volumes.GetVolumeLabels -func (c *AWSCloud) GetVolumeLabels(volumeName string) (map[string]string, error) { +// GetVolumeLabels implements Volumes.GetVolumeLabels +func (c *Cloud) GetVolumeLabels(volumeName string) (map[string]string, error) { awsDisk, err := newAWSDisk(c, volumeName) if err != nil { return nil, err @@ -1512,8 +1536,8 @@ func (c *AWSCloud) GetVolumeLabels(volumeName string) (map[string]string, error) return labels, nil } -// Implement Volumes.GetDiskPath -func (c *AWSCloud) GetDiskPath(volumeName string) (string, error) { +// GetDiskPath implements Volumes.GetDiskPath +func (c *Cloud) GetDiskPath(volumeName string) (string, error) { awsDisk, err := newAWSDisk(c, volumeName) if err != nil { return "", err @@ -1528,10 +1552,19 @@ func (c *AWSCloud) GetDiskPath(volumeName string) (string, error) { return aws.StringValue(info.Attachments[0].Device), nil } -// Implement Volumes.DiskIsAttached -func (c *AWSCloud) DiskIsAttached(diskName, instanceID string) (bool, error) { +// DiskIsAttached implements Volumes.DiskIsAttached +func (c *Cloud) DiskIsAttached(diskName, instanceID string) (bool, error) { awsInstance, err := c.getAwsInstance(instanceID) if err != nil { + if err == cloudprovider.InstanceNotFound { + // If instance no longer exists, safe to assume volume is not attached. + glog.Warningf( + "Instance %q does not exist. DiskIsAttached will assume disk %q is not attached to it.", + instanceID, + diskName) + return false, nil + } + return false, err } @@ -1549,11 +1582,11 @@ func (c *AWSCloud) DiskIsAttached(diskName, instanceID string) (bool, error) { } // Gets the current load balancer state -func (s *AWSCloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescription, error) { +func (c *Cloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescription, error) { request := &elb.DescribeLoadBalancersInput{} request.LoadBalancerNames = []*string{&name} - response, err := s.elb.DescribeLoadBalancers(request) + response, err := c.elb.DescribeLoadBalancers(request) if err != nil { if awsError, ok := err.(awserr.Error); ok { if awsError.Code() == "LoadBalancerNotFound" { @@ -1574,8 +1607,8 @@ func (s *AWSCloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescripti } // Retrieves instance's vpc id from metadata -func (self *AWSCloud) findVPCID() (string, error) { - macs, err := self.metadata.GetMetadata("network/interfaces/macs/") +func (c *Cloud) findVPCID() (string, error) { + macs, err := c.metadata.GetMetadata("network/interfaces/macs/") if err != nil { return "", fmt.Errorf("Could not list interfaces of the instance: %v", err) } @@ -1586,7 +1619,7 @@ func (self *AWSCloud) findVPCID() (string, error) { continue } url := fmt.Sprintf("network/interfaces/macs/%svpc-id", macPath) - vpcID, err := self.metadata.GetMetadata(url) + vpcID, err := c.metadata.GetMetadata(url) if err != nil { continue } @@ -1596,13 +1629,13 @@ func (self *AWSCloud) findVPCID() (string, error) { } // Retrieves the specified security group from the AWS API, or returns nil if not found -func (s *AWSCloud) findSecurityGroup(securityGroupId string) (*ec2.SecurityGroup, error) { +func (c *Cloud) findSecurityGroup(securityGroupID string) (*ec2.SecurityGroup, error) { describeSecurityGroupsRequest := &ec2.DescribeSecurityGroupsInput{ - GroupIds: []*string{&securityGroupId}, + GroupIds: []*string{&securityGroupID}, } // We don't apply our tag filters because we are retrieving by ID - groups, err := s.ec2.DescribeSecurityGroups(describeSecurityGroupsRequest) + groups, err := c.ec2.DescribeSecurityGroups(describeSecurityGroupsRequest) if err != nil { glog.Warningf("Error retrieving security group: %q", err) return nil, err @@ -1613,7 +1646,7 @@ func (s *AWSCloud) findSecurityGroup(securityGroupId string) (*ec2.SecurityGroup } if len(groups) != 1 { // This should not be possible - ids should be unique - return nil, fmt.Errorf("multiple security groups found with same id %q", securityGroupId) + return nil, fmt.Errorf("multiple security groups found with same id %q", securityGroupID) } group := groups[0] return group, nil @@ -1698,18 +1731,18 @@ func isEqualUserGroupPair(l, r *ec2.UserIdGroupPair, compareGroupUserIDs bool) b // Makes sure the security group ingress is exactly the specified permissions // Returns true if and only if changes were made // The security group must already exist -func (s *AWSCloud) setSecurityGroupIngress(securityGroupId string, permissions IPPermissionSet) (bool, error) { - group, err := s.findSecurityGroup(securityGroupId) +func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPermissionSet) (bool, error) { + group, err := c.findSecurityGroup(securityGroupID) if err != nil { glog.Warning("Error retrieving security group", err) return false, err } if group == nil { - return false, fmt.Errorf("security group not found: %s", securityGroupId) + return false, fmt.Errorf("security group not found: %s", securityGroupID) } - glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupId, group.IpPermissions) + glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) actual := NewIPPermissionSet(group.IpPermissions...) @@ -1740,23 +1773,23 @@ func (s *AWSCloud) setSecurityGroupIngress(securityGroupId string, permissions I // don't want to accidentally open more than intended while we're // applying changes. if add.Len() != 0 { - glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupId, add.List()) + glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, add.List()) request := &ec2.AuthorizeSecurityGroupIngressInput{} - request.GroupId = &securityGroupId + request.GroupId = &securityGroupID request.IpPermissions = add.List() - _, err = s.ec2.AuthorizeSecurityGroupIngress(request) + _, err = c.ec2.AuthorizeSecurityGroupIngress(request) if err != nil { return false, fmt.Errorf("error authorizing security group ingress: %v", err) } } if remove.Len() != 0 { - glog.V(2).Infof("Remove security group ingress: %s %v", securityGroupId, remove.List()) + glog.V(2).Infof("Remove security group ingress: %s %v", securityGroupID, remove.List()) request := &ec2.RevokeSecurityGroupIngressInput{} - request.GroupId = &securityGroupId + request.GroupId = &securityGroupID request.IpPermissions = remove.List() - _, err = s.ec2.RevokeSecurityGroupIngress(request) + _, err = c.ec2.RevokeSecurityGroupIngress(request) if err != nil { return false, fmt.Errorf("error revoking security group ingress: %v", err) } @@ -1768,18 +1801,18 @@ func (s *AWSCloud) setSecurityGroupIngress(securityGroupId string, permissions I // Makes sure the security group includes the specified permissions // Returns true if and only if changes were made // The security group must already exist -func (s *AWSCloud) addSecurityGroupIngress(securityGroupId string, addPermissions []*ec2.IpPermission) (bool, error) { - group, err := s.findSecurityGroup(securityGroupId) +func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions []*ec2.IpPermission) (bool, error) { + group, err := c.findSecurityGroup(securityGroupID) if err != nil { glog.Warningf("Error retrieving security group: %v", err) return false, err } if group == nil { - return false, fmt.Errorf("security group not found: %s", securityGroupId) + return false, fmt.Errorf("security group not found: %s", securityGroupID) } - glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupId, group.IpPermissions) + glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) changes := []*ec2.IpPermission{} for _, addPermission := range addPermissions { @@ -1807,12 +1840,12 @@ func (s *AWSCloud) addSecurityGroupIngress(securityGroupId string, addPermission return false, nil } - glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupId, changes) + glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, changes) request := &ec2.AuthorizeSecurityGroupIngressInput{} - request.GroupId = &securityGroupId + request.GroupId = &securityGroupID request.IpPermissions = changes - _, err = s.ec2.AuthorizeSecurityGroupIngress(request) + _, err = c.ec2.AuthorizeSecurityGroupIngress(request) if err != nil { glog.Warning("Error authorizing security group ingress", err) return false, fmt.Errorf("error authorizing security group ingress: %v", err) @@ -1824,15 +1857,15 @@ func (s *AWSCloud) addSecurityGroupIngress(securityGroupId string, addPermission // Makes sure the security group no longer includes the specified permissions // Returns true if and only if changes were made // If the security group no longer exists, will return (false, nil) -func (s *AWSCloud) removeSecurityGroupIngress(securityGroupId string, removePermissions []*ec2.IpPermission) (bool, error) { - group, err := s.findSecurityGroup(securityGroupId) +func (c *Cloud) removeSecurityGroupIngress(securityGroupID string, removePermissions []*ec2.IpPermission) (bool, error) { + group, err := c.findSecurityGroup(securityGroupID) if err != nil { glog.Warningf("Error retrieving security group: %v", err) return false, err } if group == nil { - glog.Warning("Security group not found: ", securityGroupId) + glog.Warning("Security group not found: ", securityGroupID) return false, nil } @@ -1862,12 +1895,12 @@ func (s *AWSCloud) removeSecurityGroupIngress(securityGroupId string, removePerm return false, nil } - glog.V(2).Infof("Removing security group ingress: %s %v", securityGroupId, changes) + glog.V(2).Infof("Removing security group ingress: %s %v", securityGroupID, changes) request := &ec2.RevokeSecurityGroupIngressInput{} - request.GroupId = &securityGroupId + request.GroupId = &securityGroupID request.IpPermissions = changes - _, err = s.ec2.RevokeSecurityGroupIngress(request) + _, err = c.ec2.RevokeSecurityGroupIngress(request) if err != nil { glog.Warningf("Error revoking security group ingress: %v", err) return false, err @@ -1879,14 +1912,14 @@ func (s *AWSCloud) removeSecurityGroupIngress(securityGroupId string, removePerm // Ensure that a resource has the correct tags // If it has no tags, we assume that this was a problem caused by an error in between creation and tagging, // and we add the tags. If it has a different cluster's tags, that is an error. -func (s *AWSCloud) ensureClusterTags(resourceID string, tags []*ec2.Tag) error { +func (c *Cloud) ensureClusterTags(resourceID string, tags []*ec2.Tag) error { actualTags := make(map[string]string) for _, tag := range tags { actualTags[aws.StringValue(tag.Key)] = aws.StringValue(tag.Value) } addTags := make(map[string]string) - for k, expected := range s.filterTags { + for k, expected := range c.filterTags { actual := actualTags[k] if actual == expected { continue @@ -1899,7 +1932,7 @@ func (s *AWSCloud) ensureClusterTags(resourceID string, tags []*ec2.Tag) error { } } - if err := s.createTags(resourceID, addTags); err != nil { + if err := c.createTags(resourceID, addTags); err != nil { return fmt.Errorf("error adding missing tags to resource %q: %v", resourceID, err) } @@ -1909,7 +1942,7 @@ func (s *AWSCloud) ensureClusterTags(resourceID string, tags []*ec2.Tag) error { // Makes sure the security group exists. // For multi-cluster isolation, name must be globally unique, for example derived from the service UUID. // Returns the security group id or error -func (s *AWSCloud) ensureSecurityGroup(name string, description string) (string, error) { +func (c *Cloud) ensureSecurityGroup(name string, description string) (string, error) { groupID := "" attempt := 0 for { @@ -1918,7 +1951,7 @@ func (s *AWSCloud) ensureSecurityGroup(name string, description string) (string, request := &ec2.DescribeSecurityGroupsInput{} filters := []*ec2.Filter{ newEc2Filter("group-name", name), - newEc2Filter("vpc-id", s.vpcID), + newEc2Filter("vpc-id", c.vpcID), } // Note that we do _not_ add our tag filters; group-name + vpc-id is the EC2 primary key. // However, we do check that it matches our tags. @@ -1927,7 +1960,7 @@ func (s *AWSCloud) ensureSecurityGroup(name string, description string) (string, // This shouldn't happen because name is expected to be globally unique (UUID derived) request.Filters = filters - securityGroups, err := s.ec2.DescribeSecurityGroups(request) + securityGroups, err := c.ec2.DescribeSecurityGroups(request) if err != nil { return "", err } @@ -1936,7 +1969,7 @@ func (s *AWSCloud) ensureSecurityGroup(name string, description string) (string, if len(securityGroups) > 1 { glog.Warningf("Found multiple security groups with name: %q", name) } - err := s.ensureClusterTags(aws.StringValue(securityGroups[0].GroupId), securityGroups[0].Tags) + err := c.ensureClusterTags(aws.StringValue(securityGroups[0].GroupId), securityGroups[0].Tags) if err != nil { return "", err } @@ -1945,11 +1978,11 @@ func (s *AWSCloud) ensureSecurityGroup(name string, description string) (string, } createRequest := &ec2.CreateSecurityGroupInput{} - createRequest.VpcId = &s.vpcID + createRequest.VpcId = &c.vpcID createRequest.GroupName = &name createRequest.Description = &description - createResponse, err := s.ec2.CreateSecurityGroup(createRequest) + createResponse, err := c.ec2.CreateSecurityGroup(createRequest) if err != nil { ignore := false switch err := err.(type) { @@ -1973,7 +2006,7 @@ func (s *AWSCloud) ensureSecurityGroup(name string, description string) (string, return "", fmt.Errorf("created security group, but id was not returned: %s", name) } - err := s.createTags(groupID, s.filterTags) + err := c.createTags(groupID, c.filterTags) if err != nil { // If we retry, ensureClusterTags will recover from this - it // will add the missing tags. We could delete the security @@ -1987,7 +2020,7 @@ func (s *AWSCloud) ensureSecurityGroup(name string, description string) (string, // createTags calls EC2 CreateTags, but adds retry-on-failure logic // We retry mainly because if we create an object, we cannot tag it until it is "fully created" (eventual consistency) // The error code varies though (depending on what we are tagging), so we simply retry on all errors -func (s *AWSCloud) createTags(resourceID string, tags map[string]string) error { +func (c *Cloud) createTags(resourceID string, tags map[string]string) error { if tags == nil || len(tags) == 0 { return nil } @@ -2010,7 +2043,7 @@ func (s *AWSCloud) createTags(resourceID string, tags map[string]string) error { maxAttempts := 60 for { - _, err := s.ec2.CreateTags(request) + _, err := c.ec2.CreateTags(request) if err == nil { return nil } @@ -2040,7 +2073,7 @@ func findTag(tags []*ec2.Tag, key string) (string, bool) { // Finds the subnets associated with the cluster, by matching tags. // For maximal backwards compatibility, if no subnets are tagged, it will fall-back to the current subnet. // However, in future this will likely be treated as an error. -func (c *AWSCloud) findSubnets() ([]*ec2.Subnet, error) { +func (c *Cloud) findSubnets() ([]*ec2.Subnet, error) { request := &ec2.DescribeSubnetsInput{} vpcIDFilter := newEc2Filter("vpc-id", c.vpcID) filters := []*ec2.Filter{vpcIDFilter} @@ -2074,17 +2107,17 @@ func (c *AWSCloud) findSubnets() ([]*ec2.Subnet, error) { // Finds the subnets to use for an ELB we are creating. // Normal (Internet-facing) ELBs must use public subnets, so we skip private subnets. // Internal ELBs can use public or private subnets, but if we have a private subnet we should prefer that. -func (s *AWSCloud) findELBSubnets(internalELB bool) ([]string, error) { - vpcIDFilter := newEc2Filter("vpc-id", s.vpcID) +func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error) { + vpcIDFilter := newEc2Filter("vpc-id", c.vpcID) - subnets, err := s.findSubnets() + subnets, err := c.findSubnets() if err != nil { return nil, err } rRequest := &ec2.DescribeRouteTablesInput{} rRequest.Filters = []*ec2.Filter{vpcIDFilter} - rt, err := s.ec2.DescribeRouteTables(rRequest) + rt, err := c.ec2.DescribeRouteTables(rRequest) if err != nil { return nil, fmt.Errorf("error describe route table: %v", err) } @@ -2249,10 +2282,10 @@ func buildListener(port api.ServicePort, annotations map[string]string, sslPorts } // EnsureLoadBalancer implements LoadBalancer.EnsureLoadBalancer -func (s *AWSCloud) EnsureLoadBalancer(apiService *api.Service, hosts []string) (*api.LoadBalancerStatus, error) { +func (c *Cloud) EnsureLoadBalancer(apiService *api.Service, hosts []string) (*api.LoadBalancerStatus, error) { annotations := apiService.Annotations glog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", - apiService.Namespace, apiService.Name, s.region, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, hosts, annotations) + apiService.Namespace, apiService.Name, c.region, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, hosts, annotations) if apiService.Spec.SessionAffinity != api.ServiceAffinityNone { // ELB supports sticky sessions, but only when configured for HTTP/HTTPS @@ -2286,7 +2319,7 @@ func (s *AWSCloud) EnsureLoadBalancer(apiService *api.Service, hosts []string) ( } hostSet := sets.NewString(hosts...) - instances, err := s.getInstancesByNodeNamesCached(hostSet) + instances, err := c.getInstancesByNodeNamesCached(hostSet) if err != nil { return nil, err } @@ -2321,7 +2354,7 @@ func (s *AWSCloud) EnsureLoadBalancer(apiService *api.Service, hosts []string) ( } // Find the subnets that the ELB will live in - subnetIDs, err := s.findELBSubnets(internalELB) + subnetIDs, err := c.findELBSubnets(internalELB) if err != nil { glog.Error("Error listing subnets in VPC: ", err) return nil, err @@ -2340,7 +2373,7 @@ func (s *AWSCloud) EnsureLoadBalancer(apiService *api.Service, hosts []string) ( { sgName := "k8s-elb-" + loadBalancerName sgDescription := fmt.Sprintf("Security group for Kubernetes ELB %s (%v)", loadBalancerName, serviceName) - securityGroupID, err = s.ensureSecurityGroup(sgName, sgDescription) + securityGroupID, err = c.ensureSecurityGroup(sgName, sgDescription) if err != nil { glog.Error("Error creating load balancer security group: ", err) return nil, err @@ -2377,7 +2410,7 @@ func (s *AWSCloud) EnsureLoadBalancer(apiService *api.Service, hosts []string) ( permissions.Insert(permission) } - _, err = s.setSecurityGroupIngress(securityGroupID, permissions) + _, err = c.setSecurityGroupIngress(securityGroupID, permissions) if err != nil { return nil, err } @@ -2385,7 +2418,7 @@ func (s *AWSCloud) EnsureLoadBalancer(apiService *api.Service, hosts []string) ( securityGroupIDs := []string{securityGroupID} // Build the load balancer itself - loadBalancer, err := s.ensureLoadBalancer( + loadBalancer, err := c.ensureLoadBalancer( serviceName, loadBalancerName, listeners, @@ -2398,18 +2431,18 @@ func (s *AWSCloud) EnsureLoadBalancer(apiService *api.Service, hosts []string) ( return nil, err } - err = s.ensureLoadBalancerHealthCheck(loadBalancer, listeners) + err = c.ensureLoadBalancerHealthCheck(loadBalancer, listeners) if err != nil { return nil, err } - err = s.updateInstanceSecurityGroupsForLoadBalancer(loadBalancer, instances) + err = c.updateInstanceSecurityGroupsForLoadBalancer(loadBalancer, instances) if err != nil { glog.Warningf("Error opening ingress rules for the load balancer to the instances: %v", err) return nil, err } - err = s.ensureLoadBalancerInstances(orEmpty(loadBalancer.LoadBalancerName), loadBalancer.Instances, instances) + err = c.ensureLoadBalancerInstances(orEmpty(loadBalancer.LoadBalancerName), loadBalancer.Instances, instances) if err != nil { glog.Warningf("Error registering instances with the load balancer: %v", err) return nil, err @@ -2424,9 +2457,9 @@ func (s *AWSCloud) EnsureLoadBalancer(apiService *api.Service, hosts []string) ( } // GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer -func (s *AWSCloud) GetLoadBalancer(service *api.Service) (*api.LoadBalancerStatus, bool, error) { +func (c *Cloud) GetLoadBalancer(service *api.Service) (*api.LoadBalancerStatus, bool, error) { loadBalancerName := cloudprovider.GetLoadBalancerName(service) - lb, err := s.describeLoadBalancer(loadBalancerName) + lb, err := c.describeLoadBalancer(loadBalancerName) if err != nil { return nil, false, err } @@ -2496,10 +2529,10 @@ func findSecurityGroupForInstance(instance *ec2.Instance, taggedSecurityGroups m } // Return all the security groups that are tagged as being part of our cluster -func (s *AWSCloud) getTaggedSecurityGroups() (map[string]*ec2.SecurityGroup, error) { +func (c *Cloud) getTaggedSecurityGroups() (map[string]*ec2.SecurityGroup, error) { request := &ec2.DescribeSecurityGroupsInput{} - request.Filters = s.addFilters(nil) - groups, err := s.ec2.DescribeSecurityGroups(request) + request.Filters = c.addFilters(nil) + groups, err := c.ec2.DescribeSecurityGroups(request) if err != nil { return nil, fmt.Errorf("error querying security groups: %v", err) } @@ -2518,38 +2551,38 @@ func (s *AWSCloud) getTaggedSecurityGroups() (map[string]*ec2.SecurityGroup, err // Open security group ingress rules on the instances so that the load balancer can talk to them // Will also remove any security groups ingress rules for the load balancer that are _not_ needed for allInstances -func (s *AWSCloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancerDescription, allInstances []*ec2.Instance) error { - if s.cfg.Global.DisableSecurityGroupIngress { +func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancerDescription, allInstances []*ec2.Instance) error { + if c.cfg.Global.DisableSecurityGroupIngress { return nil } // Determine the load balancer security group id - loadBalancerSecurityGroupId := "" + loadBalancerSecurityGroupID := "" for _, securityGroup := range lb.SecurityGroups { if isNilOrEmpty(securityGroup) { continue } - if loadBalancerSecurityGroupId != "" { + if loadBalancerSecurityGroupID != "" { // We create LBs with one SG glog.Warningf("Multiple security groups for load balancer: %q", orEmpty(lb.LoadBalancerName)) } - loadBalancerSecurityGroupId = *securityGroup + loadBalancerSecurityGroupID = *securityGroup } - if loadBalancerSecurityGroupId == "" { + if loadBalancerSecurityGroupID == "" { return fmt.Errorf("Could not determine security group for load balancer: %s", orEmpty(lb.LoadBalancerName)) } // Get the actual list of groups that allow ingress from the load-balancer describeRequest := &ec2.DescribeSecurityGroupsInput{} filters := []*ec2.Filter{} - filters = append(filters, newEc2Filter("ip-permission.group-id", loadBalancerSecurityGroupId)) - describeRequest.Filters = s.addFilters(filters) - actualGroups, err := s.ec2.DescribeSecurityGroups(describeRequest) + filters = append(filters, newEc2Filter("ip-permission.group-id", loadBalancerSecurityGroupID)) + describeRequest.Filters = c.addFilters(filters) + actualGroups, err := c.ec2.DescribeSecurityGroups(describeRequest) if err != nil { return fmt.Errorf("error querying security groups for ELB: %v", err) } - taggedSecurityGroups, err := s.getTaggedSecurityGroups() + taggedSecurityGroups, err := c.getTaggedSecurityGroups() if err != nil { return fmt.Errorf("error querying for tagged security groups: %v", err) } @@ -2600,38 +2633,38 @@ func (s *AWSCloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalan } } - for instanceSecurityGroupId, add := range instanceSecurityGroupIds { + for instanceSecurityGroupID, add := range instanceSecurityGroupIds { if add { - glog.V(2).Infof("Adding rule for traffic from the load balancer (%s) to instances (%s)", loadBalancerSecurityGroupId, instanceSecurityGroupId) + glog.V(2).Infof("Adding rule for traffic from the load balancer (%s) to instances (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) } else { - glog.V(2).Infof("Removing rule for traffic from the load balancer (%s) to instance (%s)", loadBalancerSecurityGroupId, instanceSecurityGroupId) + glog.V(2).Infof("Removing rule for traffic from the load balancer (%s) to instance (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) } - sourceGroupId := &ec2.UserIdGroupPair{} - sourceGroupId.GroupId = &loadBalancerSecurityGroupId + sourceGroupID := &ec2.UserIdGroupPair{} + sourceGroupID.GroupId = &loadBalancerSecurityGroupID allProtocols := "-1" permission := &ec2.IpPermission{} permission.IpProtocol = &allProtocols - permission.UserIdGroupPairs = []*ec2.UserIdGroupPair{sourceGroupId} + permission.UserIdGroupPairs = []*ec2.UserIdGroupPair{sourceGroupID} permissions := []*ec2.IpPermission{permission} if add { - changed, err := s.addSecurityGroupIngress(instanceSecurityGroupId, permissions) + changed, err := c.addSecurityGroupIngress(instanceSecurityGroupID, permissions) if err != nil { return err } if !changed { - glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupId) + glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } else { - changed, err := s.removeSecurityGroupIngress(instanceSecurityGroupId, permissions) + changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, permissions) if err != nil { return err } if !changed { - glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupId) + glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } } @@ -2640,9 +2673,9 @@ func (s *AWSCloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalan } // EnsureLoadBalancerDeleted implements LoadBalancer.EnsureLoadBalancerDeleted. -func (s *AWSCloud) EnsureLoadBalancerDeleted(service *api.Service) error { +func (c *Cloud) EnsureLoadBalancerDeleted(service *api.Service) error { loadBalancerName := cloudprovider.GetLoadBalancerName(service) - lb, err := s.describeLoadBalancer(loadBalancerName) + lb, err := c.describeLoadBalancer(loadBalancerName) if err != nil { return err } @@ -2654,7 +2687,7 @@ func (s *AWSCloud) EnsureLoadBalancerDeleted(service *api.Service) error { { // De-authorize the load balancer security group from the instances security group - err = s.updateInstanceSecurityGroupsForLoadBalancer(lb, nil) + err = c.updateInstanceSecurityGroupsForLoadBalancer(lb, nil) if err != nil { glog.Error("Error deregistering load balancer from instance security groups: ", err) return err @@ -2666,7 +2699,7 @@ func (s *AWSCloud) EnsureLoadBalancerDeleted(service *api.Service) error { request := &elb.DeleteLoadBalancerInput{} request.LoadBalancerName = lb.LoadBalancerName - _, err = s.elb.DeleteLoadBalancer(request) + _, err = c.elb.DeleteLoadBalancer(request) if err != nil { // TODO: Check if error was because load balancer was concurrently deleted glog.Error("Error deleting load balancer: ", err) @@ -2695,7 +2728,7 @@ func (s *AWSCloud) EnsureLoadBalancerDeleted(service *api.Service) error { for securityGroupID := range securityGroupIDs { request := &ec2.DeleteSecurityGroupInput{} request.GroupId = &securityGroupID - _, err := s.ec2.DeleteSecurityGroup(request) + _, err := c.ec2.DeleteSecurityGroup(request) if err == nil { delete(securityGroupIDs, securityGroupID) } else { @@ -2736,15 +2769,15 @@ func (s *AWSCloud) EnsureLoadBalancerDeleted(service *api.Service) error { } // UpdateLoadBalancer implements LoadBalancer.UpdateLoadBalancer -func (s *AWSCloud) UpdateLoadBalancer(service *api.Service, hosts []string) error { +func (c *Cloud) UpdateLoadBalancer(service *api.Service, hosts []string) error { hostSet := sets.NewString(hosts...) - instances, err := s.getInstancesByNodeNamesCached(hostSet) + instances, err := c.getInstancesByNodeNamesCached(hostSet) if err != nil { return err } loadBalancerName := cloudprovider.GetLoadBalancerName(service) - lb, err := s.describeLoadBalancer(loadBalancerName) + lb, err := c.describeLoadBalancer(loadBalancerName) if err != nil { return err } @@ -2753,12 +2786,12 @@ func (s *AWSCloud) UpdateLoadBalancer(service *api.Service, hosts []string) erro return fmt.Errorf("Load balancer not found") } - err = s.ensureLoadBalancerInstances(orEmpty(lb.LoadBalancerName), lb.Instances, instances) + err = c.ensureLoadBalancerInstances(orEmpty(lb.LoadBalancerName), lb.Instances, instances) if err != nil { return nil } - err = s.updateInstanceSecurityGroupsForLoadBalancer(lb, instances) + err = c.updateInstanceSecurityGroupsForLoadBalancer(lb, instances) if err != nil { return err } @@ -2767,8 +2800,8 @@ func (s *AWSCloud) UpdateLoadBalancer(service *api.Service, hosts []string) erro } // Returns the instance with the specified ID -func (a *AWSCloud) getInstanceByID(instanceID string) (*ec2.Instance, error) { - instances, err := a.getInstancesByIDs([]*string{&instanceID}) +func (c *Cloud) getInstanceByID(instanceID string) (*ec2.Instance, error) { + instances, err := c.getInstancesByIDs([]*string{&instanceID}) if err != nil { return nil, err } @@ -2783,7 +2816,7 @@ func (a *AWSCloud) getInstanceByID(instanceID string) (*ec2.Instance, error) { return instances[instanceID], nil } -func (a *AWSCloud) getInstancesByIDs(instanceIDs []*string) (map[string]*ec2.Instance, error) { +func (c *Cloud) getInstancesByIDs(instanceIDs []*string) (map[string]*ec2.Instance, error) { instancesByID := make(map[string]*ec2.Instance) if len(instanceIDs) == 0 { return instancesByID, nil @@ -2793,7 +2826,7 @@ func (a *AWSCloud) getInstancesByIDs(instanceIDs []*string) (map[string]*ec2.Ins InstanceIds: instanceIDs, } - instances, err := a.ec2.DescribeInstances(request) + instances, err := c.ec2.DescribeInstances(request) if err != nil { return nil, err } @@ -2813,15 +2846,15 @@ func (a *AWSCloud) getInstancesByIDs(instanceIDs []*string) (map[string]*ec2.Ins // Fetches and caches instances by node names; returns an error if any cannot be found. // This is implemented with a multi value filter on the node names, fetching the desired instances with a single query. // TODO(therc): make all the caching more rational during the 1.4 timeframe -func (a *AWSCloud) getInstancesByNodeNamesCached(nodeNames sets.String) ([]*ec2.Instance, error) { - a.mutex.Lock() - defer a.mutex.Unlock() - if nodeNames.Equal(a.lastNodeNames) { - if len(a.lastInstancesByNodeNames) > 0 { +func (c *Cloud) getInstancesByNodeNamesCached(nodeNames sets.String) ([]*ec2.Instance, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if nodeNames.Equal(c.lastNodeNames) { + if len(c.lastInstancesByNodeNames) > 0 { // We assume that if the list of nodes is the same, the underlying // instances have not changed. Later we might guard this with TTLs. glog.V(2).Infof("Returning cached instances for %v", nodeNames) - return a.lastInstancesByNodeNames, nil + return c.lastInstancesByNodeNames, nil } } names := aws.StringSlice(nodeNames.List()) @@ -2836,12 +2869,12 @@ func (a *AWSCloud) getInstancesByNodeNamesCached(nodeNames sets.String) ([]*ec2. newEc2Filter("instance-state-name", "running"), } - filters = a.addFilters(filters) + filters = c.addFilters(filters) request := &ec2.DescribeInstancesInput{ Filters: filters, } - instances, err := a.ec2.DescribeInstances(request) + instances, err := c.ec2.DescribeInstances(request) if err != nil { glog.V(2).Infof("Failed to describe instances %v", nodeNames) return nil, err @@ -2853,24 +2886,24 @@ func (a *AWSCloud) getInstancesByNodeNamesCached(nodeNames sets.String) ([]*ec2. } glog.V(2).Infof("Caching instances for %v", nodeNames) - a.lastNodeNames = nodeNames - a.lastInstancesByNodeNames = instances + c.lastNodeNames = nodeNames + c.lastInstancesByNodeNames = instances return instances, nil } // Returns the instance with the specified node name // Returns nil if it does not exist -func (a *AWSCloud) findInstanceByNodeName(nodeName string) (*ec2.Instance, error) { +func (c *Cloud) findInstanceByNodeName(nodeName string) (*ec2.Instance, error) { filters := []*ec2.Filter{ newEc2Filter("private-dns-name", nodeName), newEc2Filter("instance-state-name", "running"), } - filters = a.addFilters(filters) + filters = c.addFilters(filters) request := &ec2.DescribeInstancesInput{ Filters: filters, } - instances, err := a.ec2.DescribeInstances(request) + instances, err := c.ec2.DescribeInstances(request) if err != nil { return nil, err } @@ -2885,18 +2918,18 @@ func (a *AWSCloud) findInstanceByNodeName(nodeName string) (*ec2.Instance, error // Returns the instance with the specified node name // Like findInstanceByNodeName, but returns error if node not found -func (a *AWSCloud) getInstanceByNodeName(nodeName string) (*ec2.Instance, error) { - instance, err := a.findInstanceByNodeName(nodeName) +func (c *Cloud) getInstanceByNodeName(nodeName string) (*ec2.Instance, error) { + instance, err := c.findInstanceByNodeName(nodeName) if err == nil && instance == nil { - return nil, fmt.Errorf("no instances found for name: %s", nodeName) + return nil, cloudprovider.InstanceNotFound } return instance, err } // Add additional filters, to match on our tags // This lets us run multiple k8s clusters in a single EC2 AZ -func (s *AWSCloud) addFilters(filters []*ec2.Filter) []*ec2.Filter { - for k, v := range s.filterTags { +func (c *Cloud) addFilters(filters []*ec2.Filter) []*ec2.Filter { + for k, v := range c.filterTags { filters = append(filters, newEc2Filter("tag:"+k, v)) } if len(filters) == 0 { @@ -2909,6 +2942,6 @@ func (s *AWSCloud) addFilters(filters []*ec2.Filter) []*ec2.Filter { } // Returns the cluster name or an empty string -func (s *AWSCloud) getClusterName() string { - return s.filterTags[TagNameKubernetesCluster] +func (c *Cloud) getClusterName() string { + return c.filterTags[TagNameKubernetesCluster] } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go index 563c90de14..5edbb3da99 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go @@ -25,7 +25,7 @@ import ( ) // AWSCloud implements InstanceGroups -var _ InstanceGroups = &AWSCloud{} +var _ InstanceGroups = &Cloud{} // ResizeInstanceGroup sets the size of the specificed instancegroup Exported // so it can be used by the e2e tests, which don't want to instantiate a full @@ -44,8 +44,8 @@ func ResizeInstanceGroup(asg ASG, instanceGroupName string, size int) error { // Implement InstanceGroups.ResizeInstanceGroup // Set the size to the fixed size -func (a *AWSCloud) ResizeInstanceGroup(instanceGroupName string, size int) error { - return ResizeInstanceGroup(a.asg, instanceGroupName, size) +func (c *Cloud) ResizeInstanceGroup(instanceGroupName string, size int) error { + return ResizeInstanceGroup(c.asg, instanceGroupName, size) } // DescribeInstanceGroup gets info about the specified instancegroup @@ -72,8 +72,8 @@ func DescribeInstanceGroup(asg ASG, instanceGroupName string) (InstanceGroupInfo // Implement InstanceGroups.DescribeInstanceGroup // Queries the cloud provider for information about the specified instance group -func (a *AWSCloud) DescribeInstanceGroup(instanceGroupName string) (InstanceGroupInfo, error) { - return DescribeInstanceGroup(a.asg, instanceGroupName) +func (c *Cloud) DescribeInstanceGroup(instanceGroupName string) (InstanceGroupInfo, error) { + return DescribeInstanceGroup(c.asg, instanceGroupName) } // awsInstanceGroup implements InstanceGroupInfo diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go index 727675d8f0..85b2f2d50c 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go @@ -30,8 +30,8 @@ import ( const ProxyProtocolPolicyName = "k8s-proxyprotocol-enabled" -func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBalancerName string, listeners []*elb.Listener, subnetIDs []string, securityGroupIDs []string, internalELB, proxyProtocol bool) (*elb.LoadBalancerDescription, error) { - loadBalancer, err := s.describeLoadBalancer(loadBalancerName) +func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBalancerName string, listeners []*elb.Listener, subnetIDs []string, securityGroupIDs []string, internalELB, proxyProtocol bool) (*elb.LoadBalancerDescription, error) { + loadBalancer, err := c.describeLoadBalancer(loadBalancerName) if err != nil { return nil, err } @@ -55,25 +55,25 @@ func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadB createRequest.SecurityGroups = stringPointerArray(securityGroupIDs) createRequest.Tags = []*elb.Tag{ - {Key: aws.String(TagNameKubernetesCluster), Value: aws.String(s.getClusterName())}, + {Key: aws.String(TagNameKubernetesCluster), Value: aws.String(c.getClusterName())}, {Key: aws.String(TagNameKubernetesService), Value: aws.String(namespacedName.String())}, } glog.Infof("Creating load balancer for %v with name: ", namespacedName, loadBalancerName) - _, err := s.elb.CreateLoadBalancer(createRequest) + _, err := c.elb.CreateLoadBalancer(createRequest) if err != nil { return nil, err } if proxyProtocol { - err = s.createProxyProtocolPolicy(loadBalancerName) + err = c.createProxyProtocolPolicy(loadBalancerName) if err != nil { return nil, err } for _, listener := range listeners { glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to true", *listener.InstancePort) - err := s.setBackendPolicies(loadBalancerName, *listener.InstancePort, []*string{aws.String(ProxyProtocolPolicyName)}) + err := c.setBackendPolicies(loadBalancerName, *listener.InstancePort, []*string{aws.String(ProxyProtocolPolicyName)}) if err != nil { return nil, err } @@ -97,7 +97,7 @@ func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadB request.LoadBalancerName = aws.String(loadBalancerName) request.Subnets = stringSetToPointers(removals) glog.V(2).Info("Detaching load balancer from removed subnets") - _, err := s.elb.DetachLoadBalancerFromSubnets(request) + _, err := c.elb.DetachLoadBalancerFromSubnets(request) if err != nil { return nil, fmt.Errorf("error detaching AWS loadbalancer from subnets: %v", err) } @@ -109,7 +109,7 @@ func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadB request.LoadBalancerName = aws.String(loadBalancerName) request.Subnets = stringSetToPointers(additions) glog.V(2).Info("Attaching load balancer to added subnets") - _, err := s.elb.AttachLoadBalancerToSubnets(request) + _, err := c.elb.AttachLoadBalancerToSubnets(request) if err != nil { return nil, fmt.Errorf("error attaching AWS loadbalancer to subnets: %v", err) } @@ -128,7 +128,7 @@ func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadB request.LoadBalancerName = aws.String(loadBalancerName) request.SecurityGroups = stringPointerArray(securityGroupIDs) glog.V(2).Info("Applying updated security groups to load balancer") - _, err := s.elb.ApplySecurityGroupsToLoadBalancer(request) + _, err := c.elb.ApplySecurityGroupsToLoadBalancer(request) if err != nil { return nil, fmt.Errorf("error applying AWS loadbalancer security groups: %v", err) } @@ -188,7 +188,7 @@ func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadB request.LoadBalancerName = aws.String(loadBalancerName) request.LoadBalancerPorts = removals glog.V(2).Info("Deleting removed load balancer listeners") - _, err := s.elb.DeleteLoadBalancerListeners(request) + _, err := c.elb.DeleteLoadBalancerListeners(request) if err != nil { return nil, fmt.Errorf("error deleting AWS loadbalancer listeners: %v", err) } @@ -200,7 +200,7 @@ func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadB request.LoadBalancerName = aws.String(loadBalancerName) request.Listeners = additions glog.V(2).Info("Creating added load balancer listeners") - _, err := s.elb.CreateLoadBalancerListeners(request) + _, err := c.elb.CreateLoadBalancerListeners(request) if err != nil { return nil, fmt.Errorf("error creating AWS loadbalancer listeners: %v", err) } @@ -219,7 +219,7 @@ func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadB // back if a policy of the same name already exists. However, the aws-sdk does not // seem to return an error to us in these cases. Therefore this will issue an API // request every time. - err := s.createProxyProtocolPolicy(loadBalancerName) + err := c.createProxyProtocolPolicy(loadBalancerName) if err != nil { return nil, err } @@ -252,7 +252,7 @@ func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadB if setPolicy { glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to %t", instancePort, proxyProtocol) - err := s.setBackendPolicies(loadBalancerName, instancePort, proxyPolicies) + err := c.setBackendPolicies(loadBalancerName, instancePort, proxyPolicies) if err != nil { return nil, err } @@ -266,7 +266,7 @@ func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadB for instancePort, found := range foundBackends { if !found { glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to false", instancePort) - err := s.setBackendPolicies(loadBalancerName, instancePort, []*string{}) + err := c.setBackendPolicies(loadBalancerName, instancePort, []*string{}) if err != nil { return nil, err } @@ -277,7 +277,7 @@ func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadB } if dirty { - loadBalancer, err = s.describeLoadBalancer(loadBalancerName) + loadBalancer, err = c.describeLoadBalancer(loadBalancerName) if err != nil { glog.Warning("Unable to retrieve load balancer after creation/update") return nil, err @@ -288,7 +288,7 @@ func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadB } // Makes sure that the health check for an ELB matches the configured listeners -func (s *AWSCloud) ensureLoadBalancerHealthCheck(loadBalancer *elb.LoadBalancerDescription, listeners []*elb.Listener) error { +func (c *Cloud) ensureLoadBalancerHealthCheck(loadBalancer *elb.LoadBalancerDescription, listeners []*elb.Listener) error { actual := loadBalancer.HealthCheck // Default AWS settings @@ -332,7 +332,7 @@ func (s *AWSCloud) ensureLoadBalancerHealthCheck(loadBalancer *elb.LoadBalancerD request.HealthCheck = healthCheck request.LoadBalancerName = loadBalancer.LoadBalancerName - _, err := s.elb.ConfigureHealthCheck(request) + _, err := c.elb.ConfigureHealthCheck(request) if err != nil { return fmt.Errorf("error configuring load-balancer health-check: %v", err) } @@ -341,7 +341,7 @@ func (s *AWSCloud) ensureLoadBalancerHealthCheck(loadBalancer *elb.LoadBalancerD } // Makes sure that exactly the specified hosts are registered as instances with the load balancer -func (s *AWSCloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances []*elb.Instance, instances []*ec2.Instance) error { +func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances []*elb.Instance, instances []*ec2.Instance) error { expected := sets.NewString() for _, instance := range instances { expected.Insert(orEmpty(instance.InstanceId)) @@ -373,7 +373,7 @@ func (s *AWSCloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstan registerRequest := &elb.RegisterInstancesWithLoadBalancerInput{} registerRequest.Instances = addInstances registerRequest.LoadBalancerName = aws.String(loadBalancerName) - _, err := s.elb.RegisterInstancesWithLoadBalancer(registerRequest) + _, err := c.elb.RegisterInstancesWithLoadBalancer(registerRequest) if err != nil { return err } @@ -384,7 +384,7 @@ func (s *AWSCloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstan deregisterRequest := &elb.DeregisterInstancesFromLoadBalancerInput{} deregisterRequest.Instances = removeInstances deregisterRequest.LoadBalancerName = aws.String(loadBalancerName) - _, err := s.elb.DeregisterInstancesFromLoadBalancer(deregisterRequest) + _, err := c.elb.DeregisterInstancesFromLoadBalancer(deregisterRequest) if err != nil { return err } @@ -394,7 +394,7 @@ func (s *AWSCloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstan return nil } -func (s *AWSCloud) createProxyProtocolPolicy(loadBalancerName string) error { +func (c *Cloud) createProxyProtocolPolicy(loadBalancerName string) error { request := &elb.CreateLoadBalancerPolicyInput{ LoadBalancerName: aws.String(loadBalancerName), PolicyName: aws.String(ProxyProtocolPolicyName), @@ -407,7 +407,7 @@ func (s *AWSCloud) createProxyProtocolPolicy(loadBalancerName string) error { }, } glog.V(2).Info("Creating proxy protocol policy on load balancer") - _, err := s.elb.CreateLoadBalancerPolicy(request) + _, err := c.elb.CreateLoadBalancerPolicy(request) if err != nil { return fmt.Errorf("error creating proxy protocol policy on load balancer: %v", err) } @@ -415,7 +415,7 @@ func (s *AWSCloud) createProxyProtocolPolicy(loadBalancerName string) error { return nil } -func (s *AWSCloud) setBackendPolicies(loadBalancerName string, instancePort int64, policies []*string) error { +func (c *Cloud) setBackendPolicies(loadBalancerName string, instancePort int64, policies []*string) error { request := &elb.SetLoadBalancerPoliciesForBackendServerInput{ InstancePort: aws.Int64(instancePort), LoadBalancerName: aws.String(loadBalancerName), @@ -426,7 +426,7 @@ func (s *AWSCloud) setBackendPolicies(loadBalancerName string, instancePort int6 } else { glog.V(2).Infof("Removing AWS loadbalancer backend policies on node port %d", instancePort) } - _, err := s.elb.SetLoadBalancerPoliciesForBackendServer(request) + _, err := c.elb.SetLoadBalancerPoliciesForBackendServer(request) if err != nil { return fmt.Errorf("error adjusting AWS loadbalancer backend policies: %v", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go index a469e5f70b..63a9e2e5ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go @@ -25,14 +25,14 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider" ) -func (s *AWSCloud) findRouteTable(clusterName string) (*ec2.RouteTable, error) { +func (c *Cloud) findRouteTable(clusterName string) (*ec2.RouteTable, error) { // This should be unnecessary (we already filter on TagNameKubernetesCluster, // and something is broken if cluster name doesn't match, but anyway... // TODO: All clouds should be cluster-aware by default filters := []*ec2.Filter{newEc2Filter("tag:"+TagNameKubernetesCluster, clusterName)} - request := &ec2.DescribeRouteTablesInput{Filters: s.addFilters(filters)} + request := &ec2.DescribeRouteTablesInput{Filters: c.addFilters(filters)} - tables, err := s.ec2.DescribeRouteTables(request) + tables, err := c.ec2.DescribeRouteTables(request) if err != nil { return nil, err } @@ -49,8 +49,8 @@ func (s *AWSCloud) findRouteTable(clusterName string) (*ec2.RouteTable, error) { // ListRoutes implements Routes.ListRoutes // List all routes that match the filter -func (s *AWSCloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) { - table, err := s.findRouteTable(clusterName) +func (c *Cloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) { + table, err := c.findRouteTable(clusterName) if err != nil { return nil, err } @@ -68,7 +68,7 @@ func (s *AWSCloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error instanceIDs = append(instanceIDs, &instanceID) } - instances, err := s.getInstancesByIDs(instanceIDs) + instances, err := c.getInstancesByIDs(instanceIDs) if err != nil { return nil, err } @@ -95,12 +95,12 @@ func (s *AWSCloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error } // Sets the instance attribute "source-dest-check" to the specified value -func (s *AWSCloud) configureInstanceSourceDestCheck(instanceID string, sourceDestCheck bool) error { +func (c *Cloud) configureInstanceSourceDestCheck(instanceID string, sourceDestCheck bool) error { request := &ec2.ModifyInstanceAttributeInput{} request.InstanceId = aws.String(instanceID) request.SourceDestCheck = &ec2.AttributeBooleanValue{Value: aws.Bool(sourceDestCheck)} - _, err := s.ec2.ModifyInstanceAttribute(request) + _, err := c.ec2.ModifyInstanceAttribute(request) if err != nil { return fmt.Errorf("error configuring source-dest-check on instance %s: %v", instanceID, err) } @@ -109,20 +109,20 @@ func (s *AWSCloud) configureInstanceSourceDestCheck(instanceID string, sourceDes // CreateRoute implements Routes.CreateRoute // Create the described route -func (s *AWSCloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error { - instance, err := s.getInstanceByNodeName(route.TargetInstance) +func (c *Cloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error { + instance, err := c.getInstanceByNodeName(route.TargetInstance) if err != nil { return err } // In addition to configuring the route itself, we also need to configure the instance to accept that traffic // On AWS, this requires turning source-dest checks off - err = s.configureInstanceSourceDestCheck(orEmpty(instance.InstanceId), false) + err = c.configureInstanceSourceDestCheck(orEmpty(instance.InstanceId), false) if err != nil { return err } - table, err := s.findRouteTable(clusterName) + table, err := c.findRouteTable(clusterName) if err != nil { return err } @@ -147,7 +147,7 @@ func (s *AWSCloud) CreateRoute(clusterName string, nameHint string, route *cloud request.DestinationCidrBlock = deleteRoute.DestinationCidrBlock request.RouteTableId = table.RouteTableId - _, err = s.ec2.DeleteRoute(request) + _, err = c.ec2.DeleteRoute(request) if err != nil { return fmt.Errorf("error deleting blackholed AWS route (%s): %v", aws.StringValue(deleteRoute.DestinationCidrBlock), err) } @@ -159,7 +159,7 @@ func (s *AWSCloud) CreateRoute(clusterName string, nameHint string, route *cloud request.InstanceId = instance.InstanceId request.RouteTableId = table.RouteTableId - _, err = s.ec2.CreateRoute(request) + _, err = c.ec2.CreateRoute(request) if err != nil { return fmt.Errorf("error creating AWS route (%s): %v", route.DestinationCIDR, err) } @@ -169,8 +169,8 @@ func (s *AWSCloud) CreateRoute(clusterName string, nameHint string, route *cloud // DeleteRoute implements Routes.DeleteRoute // Delete the specified route -func (s *AWSCloud) DeleteRoute(clusterName string, route *cloudprovider.Route) error { - table, err := s.findRouteTable(clusterName) +func (c *Cloud) DeleteRoute(clusterName string, route *cloudprovider.Route) error { + table, err := c.findRouteTable(clusterName) if err != nil { return err } @@ -179,7 +179,7 @@ func (s *AWSCloud) DeleteRoute(clusterName string, route *cloudprovider.Route) e request.DestinationCidrBlock = aws.String(route.DestinationCIDR) request.RouteTableId = table.RouteTableId - _, err = s.ec2.DeleteRoute(request) + _, err = c.ec2.DeleteRoute(request) if err != nil { return fmt.Errorf("error deleting AWS route (%s): %v", route.DestinationCIDR, err) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go index 14105d5923..015929b678 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go @@ -2363,6 +2363,15 @@ func (gce *GCECloud) AttachDisk(diskName, instanceID string, readOnly bool) erro func (gce *GCECloud) DetachDisk(devicePath, instanceID string) error { inst, err := gce.getInstanceByName(instanceID) if err != nil { + if err == cloudprovider.InstanceNotFound { + // If instance no longer exists, safe to assume volume is not attached. + glog.Warningf( + "Instance %q does not exist. DetachDisk will assume PD %q is not attached to it.", + instanceID, + devicePath) + return nil + } + return fmt.Errorf("error getting instance %q", instanceID) } @@ -2377,6 +2386,15 @@ func (gce *GCECloud) DetachDisk(devicePath, instanceID string) error { func (gce *GCECloud) DiskIsAttached(diskName, instanceID string) (bool, error) { instance, err := gce.getInstanceByName(instanceID) if err != nil { + if err == cloudprovider.InstanceNotFound { + // If instance no longer exists, safe to assume volume is not attached. + glog.Warningf( + "Instance %q does not exist. DiskIsAttached will assume PD %q is not attached to it.", + instanceID, + diskName) + return false, nil + } + return false, err } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/cidr_allocator.go b/vendor/k8s.io/kubernetes/pkg/controller/node/cidr_allocator.go index 95170aeb7c..9a1b900afa 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/cidr_allocator.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/cidr_allocator.go @@ -20,10 +20,13 @@ import ( "errors" "fmt" "net" + "sync" "k8s.io/kubernetes/pkg/api" + apierrors "k8s.io/kubernetes/pkg/api/errors" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" "github.com/golang/glog" @@ -60,6 +63,9 @@ type rangeAllocator struct { // This increases a throughput of CIDR assignment by not blocking on long operations. nodeCIDRUpdateChannel chan nodeAndCIDR recorder record.EventRecorder + // Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation + sync.Mutex + nodesInProcessing sets.String } // NewCIDRRangeAllocator returns a CIDRAllocator to allocate CIDR for node @@ -77,6 +83,7 @@ func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, s clusterCIDR: clusterCIDR, nodeCIDRUpdateChannel: make(chan nodeAndCIDR, cidrUpdateQueueSize), recorder: recorder, + nodesInProcessing: sets.NewString(), } if serviceCIDR != nil { @@ -122,7 +129,24 @@ func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, s return ra, nil } +func (r *rangeAllocator) insertNodeToProcessing(nodeName string) bool { + r.Lock() + defer r.Unlock() + if r.nodesInProcessing.Has(nodeName) { + return false + } + r.nodesInProcessing.Insert(nodeName) + return true +} + +func (r *rangeAllocator) removeNodeFromProcessing(nodeName string) { + r.Lock() + defer r.Unlock() + r.nodesInProcessing.Delete(nodeName) +} + func (r *rangeAllocator) occupyCIDR(node *api.Node) error { + defer r.removeNodeFromProcessing(node.Name) if node.Spec.PodCIDR == "" { return nil } @@ -138,12 +162,22 @@ func (r *rangeAllocator) occupyCIDR(node *api.Node) error { // AllocateOrOccupyCIDR looks at the given node, assigns it a valid CIDR // if it doesn't currently have one or mark the CIDR as used if the node already have one. +// WARNING: If you're adding any return calls or defer any more work from this function +// you have to handle correctly nodesInProcessing. func (r *rangeAllocator) AllocateOrOccupyCIDR(node *api.Node) error { + if node == nil { + return nil + } + if !r.insertNodeToProcessing(node.Name) { + glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name) + return nil + } if node.Spec.PodCIDR != "" { return r.occupyCIDR(node) } podCIDR, err := r.cidrs.allocateNext() if err != nil { + r.removeNodeFromProcessing(node.Name) recordNodeStatusChange(r.recorder, node, "CIDRNotAvailable") return fmt.Errorf("failed to allocate cidr: %v", err) } @@ -173,8 +207,8 @@ func (r *rangeAllocator) ReleaseCIDR(node *api.Node) error { return err } -// Marks all CIDRs with subNetMaskSize that belongs to serviceCIDR as used, so that they won't be -// assignable. +// Marks all CIDRs with subNetMaskSize that belongs to serviceCIDR as used, +// so that they won't be assignable. func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) { // Checks if service CIDR has a nonempty intersection with cluster CIDR. It is the case if either // clusterCIDR contains serviceCIDR with clusterCIDR's Mask applied (this means that clusterCIDR contains serviceCIDR) @@ -192,6 +226,7 @@ func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) { func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error { var err error var node *api.Node + defer r.removeNodeFromProcessing(data.nodeName) for rep := 0; rep < podCIDRUpdateRetry; rep++ { // TODO: change it to using PATCH instead of full Node updates. node, err = r.client.Core().Nodes().Get(data.nodeName) @@ -209,9 +244,14 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error { } if err != nil { recordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed") - glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", data.nodeName, err) - if releaseErr := r.cidrs.release(data.cidr); releaseErr != nil { - glog.Errorf("Error releasing allocated CIDR for node %v: %v", data.nodeName, releaseErr) + // We accept the fact that we may leek CIDRs here. This is safer than releasing + // them in case when we don't know if request went through. + // NodeController restart will return all falsely allocated CIDRs to the pool. + if !apierrors.IsServerTimeout(err) { + glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", data.nodeName, err) + if releaseErr := r.cidrs.release(data.cidr); releaseErr != nil { + glog.Errorf("Error releasing allocated CIDR for node %v: %v", data.nodeName, releaseErr) + } } } return err diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/cidr_set.go b/vendor/k8s.io/kubernetes/pkg/controller/node/cidr_set.go index 5f013d92f0..02f2a63eb2 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/cidr_set.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/cidr_set.go @@ -78,26 +78,13 @@ func (s *cidrSet) allocateNext() (*net.IPNet, error) { }, nil } -func (s *cidrSet) release(cidr *net.IPNet) error { - used, err := s.getIndexForCIDR(cidr) - if err != nil { - return err - } - - s.Lock() - defer s.Unlock() - s.used.SetBit(&s.used, used, 0) - - return nil -} - -func (s *cidrSet) occupy(cidr *net.IPNet) (err error) { - begin, end := 0, s.maxCIDRs +func (s *cidrSet) getBeginingAndEndIndices(cidr *net.IPNet) (begin, end int, err error) { + begin, end = 0, s.maxCIDRs cidrMask := cidr.Mask maskSize, _ := cidrMask.Size() if !s.clusterCIDR.Contains(cidr.IP.Mask(s.clusterCIDR.Mask)) && !cidr.Contains(s.clusterCIDR.IP.Mask(cidr.Mask)) { - return fmt.Errorf("cidr %v is out the range of cluster cidr %v", cidr, s.clusterCIDR) + return -1, -1, fmt.Errorf("cidr %v is out the range of cluster cidr %v", cidr, s.clusterCIDR) } if s.clusterMaskSize < maskSize { @@ -107,7 +94,7 @@ func (s *cidrSet) occupy(cidr *net.IPNet) (err error) { Mask: subNetMask, }) if err != nil { - return err + return -1, -1, err } ip := make([]byte, 4) @@ -118,9 +105,30 @@ func (s *cidrSet) occupy(cidr *net.IPNet) (err error) { Mask: subNetMask, }) if err != nil { - return err + return -1, -1, err } } + return begin, end, nil +} + +func (s *cidrSet) release(cidr *net.IPNet) error { + begin, end, err := s.getBeginingAndEndIndices(cidr) + if err != nil { + return err + } + s.Lock() + defer s.Unlock() + for i := begin; i <= end; i++ { + s.used.SetBit(&s.used, i, 0) + } + return nil +} + +func (s *cidrSet) occupy(cidr *net.IPNet) (err error) { + begin, end, err := s.getBeginingAndEndIndices(cidr) + if err != nil { + return err + } s.Lock() defer s.Unlock() diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go b/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go index 175c768a59..266e1ef330 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go @@ -232,6 +232,34 @@ func NewNodeController( glog.Errorf("Error allocating CIDR: %v", err) } }, + UpdateFunc: func(_, obj interface{}) { + node := obj.(*api.Node) + // If the PodCIDR is not empty we either: + // - already processed a Node that already had a CIDR after NC restarted + // (cidr is marked as used), + // - already processed a Node successfully and allocated a CIDR for it + // (cidr is marked as used), + // - already processed a Node but we did saw a "timeout" response and + // request eventually got through in this case we haven't released + // the allocated CIDR (cidr is still marked as used). + // There's a possible error here: + // - NC sees a new Node and assigns a CIDR X to it, + // - Update Node call fails with a timeout, + // - Node is updated by some other component, NC sees an update and + // assigns CIDR Y to the Node, + // - Both CIDR X and CIDR Y are marked as used in the local cache, + // even though Node sees only CIDR Y + // The problem here is that in in-memory cache we see CIDR X as marked, + // which prevents it from being assigned to any new node. The cluster + // state is correct. + // Restart of NC fixes the issue. + if node.Spec.PodCIDR == "" { + err := nc.cidrAllocator.AllocateOrOccupyCIDR(node) + if err != nil { + glog.Errorf("Error allocating CIDR: %v", err) + } + } + }, DeleteFunc: func(obj interface{}) { node := obj.(*api.Node) err := nc.cidrAllocator.ReleaseCIDR(node) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/OWNERS b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/OWNERS new file mode 100644 index 0000000000..73ab6a21c9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/OWNERS @@ -0,0 +1,2 @@ +assignees: + - saad-ali diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attach_detach_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/controller/volume/attach_detach_controller.go rename to vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go index 7997f5e8f6..d6f042ce65 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attach_detach_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go @@ -16,7 +16,7 @@ limitations under the License. // Package volume implements a controller to manage volume attach and detach // operations. -package volume +package attachdetach import ( "fmt" @@ -28,10 +28,10 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller/framework" - "k8s.io/kubernetes/pkg/controller/volume/cache" - "k8s.io/kubernetes/pkg/controller/volume/populator" - "k8s.io/kubernetes/pkg/controller/volume/reconciler" - "k8s.io/kubernetes/pkg/controller/volume/statusupdater" + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator" + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler" + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/io" "k8s.io/kubernetes/pkg/util/mount" @@ -54,7 +54,7 @@ const ( // desiredStateOfWorldPopulatorLoopSleepPeriod is the amount of time the // DesiredStateOfWorldPopulator loop waits between successive executions - desiredStateOfWorldPopulatorLoopSleepPeriod time.Duration = 5 * time.Minute + desiredStateOfWorldPopulatorLoopSleepPeriod time.Duration = 1 * time.Minute ) // AttachDetachController defines the operations supported by this controller. diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/cache/actual_state_of_world.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/controller/volume/cache/actual_state_of_world.go rename to vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go index b90981e40d..acdd75f533 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/cache/actual_state_of_world.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go @@ -224,7 +224,7 @@ type nodeToUpdateStatusFor struct { } func (asw *actualStateOfWorld) MarkVolumeAsAttached( - volumeSpec *volume.Spec, nodeName string, devicePath string) error { + _ api.UniqueVolumeName, volumeSpec *volume.Spec, nodeName string, devicePath string) error { _, err := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/cache/desired_state_of_world.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/desired_state_of_world.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/controller/volume/cache/desired_state_of_world.go rename to vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/desired_state_of_world.go diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/populator/desired_state_of_world_populator.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go similarity index 84% rename from vendor/k8s.io/kubernetes/pkg/controller/volume/populator/desired_state_of_world_populator.go rename to vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go index da9554c211..ab9cbac315 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/populator/desired_state_of_world_populator.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go @@ -26,7 +26,7 @@ import ( "k8s.io/kubernetes/pkg/api" kcache "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/controller/framework" - "k8s.io/kubernetes/pkg/controller/volume/cache" + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) @@ -82,12 +82,26 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { glog.Errorf("MetaNamespaceKeyFunc failed for pod %q (UID %q) with: %v", dswPodKey, dswPodUID, err) continue } - // retrieve the pod object from pod informer with the namespace key - informerPodObj, exists, err := dswp.podInformer.GetStore().GetByKey(dswPodKey) - if err != nil || informerPodObj == nil { - glog.Errorf("podInformer GetByKey failed for pod %q (UID %q) with %v", dswPodKey, dswPodUID, err) + + // Retrieve the pod object from pod informer with the namespace key + informerPodObj, exists, err := + dswp.podInformer.GetStore().GetByKey(dswPodKey) + if err != nil { + glog.Errorf( + "podInformer GetByKey failed for pod %q (UID %q) with %v", + dswPodKey, + dswPodUID, + err) continue } + if exists && informerPodObj == nil { + glog.Info( + "podInformer GetByKey found pod, but informerPodObj is nil for pod %q (UID %q)", + dswPodKey, + dswPodUID) + continue + } + if exists { informerPod, ok := informerPodObj.(*api.Pod) if !ok { @@ -95,7 +109,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { continue } informerPodUID := volumehelper.GetUniquePodName(informerPod) - // Check whether the unique idenfier of the pod from dsw matches the one retrived from pod informer + // Check whether the unique identifier of the pod from dsw matches the one retrieved from pod informer if informerPodUID == dswPodUID { glog.V(10).Infof( "Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID) @@ -103,7 +117,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { } } - // the pod from dsw does not exist in pod informer, or it does not match the unique idenfier retrieved + // the pod from dsw does not exist in pod informer, or it does not match the unique identifer retrieved // from the informer, delete it from dsw glog.V(1).Infof( "Removing pod %q (UID %q) from dsw because it does not exist in pod informer.", dswPodKey, dswPodUID) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/reconciler/reconciler.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go similarity index 88% rename from vendor/k8s.io/kubernetes/pkg/controller/volume/reconciler/reconciler.go rename to vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go index 914d93250c..e27fa8c326 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/reconciler/reconciler.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go @@ -23,10 +23,11 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/controller/volume/cache" - "k8s.io/kubernetes/pkg/controller/volume/statusupdater" - "k8s.io/kubernetes/pkg/util/goroutinemap" + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater" + "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" ) @@ -114,9 +115,9 @@ func (rc *reconciler) reconciliationLoopFunc() func() { glog.Infof("Started DetachVolume for volume %q from node %q", attachedVolume.VolumeName, attachedVolume.NodeName) } if err != nil && - !goroutinemap.IsAlreadyExists(err) && - !goroutinemap.IsExponentialBackoff(err) { - // Ignore goroutinemap.IsAlreadyExists && goroutinemap.IsExponentialBackoff errors, they are expected. + !nestedpendingoperations.IsAlreadyExists(err) && + !exponentialbackoff.IsExponentialBackoff(err) { + // Ignore nestedpendingoperations.IsAlreadyExists && exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. glog.Errorf( "operationExecutor.DetachVolume failed to start for volume %q (spec.Name: %q) from node %q with err: %v", @@ -134,9 +135,9 @@ func (rc *reconciler) reconciliationLoopFunc() func() { glog.Infof("Started DetachVolume for volume %q from node %q due to maxWaitForUnmountDuration expiry.", attachedVolume.VolumeName, attachedVolume.NodeName) } if err != nil && - !goroutinemap.IsAlreadyExists(err) && - !goroutinemap.IsExponentialBackoff(err) { - // Ignore goroutinemap.IsAlreadyExists && goroutinemap.IsExponentialBackoff errors, they are expected. + !nestedpendingoperations.IsAlreadyExists(err) && + !exponentialbackoff.IsExponentialBackoff(err) { + // Ignore nestedpendingoperations.IsAlreadyExists && exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. glog.Errorf( "operationExecutor.DetachVolume failed to start (maxWaitForUnmountDuration expiry) for volume %q (spec.Name: %q) from node %q with err: %v", @@ -169,9 +170,9 @@ func (rc *reconciler) reconciliationLoopFunc() func() { glog.Infof("Started AttachVolume for volume %q to node %q", volumeToAttach.VolumeName, volumeToAttach.NodeName) } if err != nil && - !goroutinemap.IsAlreadyExists(err) && - !goroutinemap.IsExponentialBackoff(err) { - // Ignore goroutinemap.IsAlreadyExists && goroutinemap.IsExponentialBackoff errors, they are expected. + !nestedpendingoperations.IsAlreadyExists(err) && + !exponentialbackoff.IsExponentialBackoff(err) { + // Ignore nestedpendingoperations.IsAlreadyExists && exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. glog.Errorf( "operationExecutor.AttachVolume failed to start for volume %q (spec.Name: %q) to node %q with err: %v", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/statusupdater/fake_node_status_updater.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/fake_node_status_updater.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/controller/volume/statusupdater/fake_node_status_updater.go rename to vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/fake_node_status_updater.go diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/statusupdater/node_status_updater.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/controller/volume/statusupdater/node_status_updater.go rename to vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go index cf9d989b5f..27c3a25b83 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/statusupdater/node_status_updater.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go @@ -27,7 +27,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/controller/framework" - "k8s.io/kubernetes/pkg/controller/volume/cache" + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/util/strategicpatch" ) @@ -62,10 +62,12 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error { for nodeName, attachedVolumes := range nodesToUpdate { nodeObj, exists, err := nsu.nodeInformer.GetStore().GetByKey(nodeName) if nodeObj == nil || !exists || err != nil { - return fmt.Errorf( - "failed to find node %q in NodeInformer cache. %v", + // If node does not exist, its status cannot be updated, log error and move on. + glog.Warningf( + "Could not update node status. Failed to find node %q in NodeInformer cache. %v", nodeName, err) + return nil } node, ok := nodeObj.(*api.Node) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/persistentvolume/OWNERS b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/OWNERS similarity index 73% rename from vendor/k8s.io/kubernetes/pkg/controller/persistentvolume/OWNERS rename to vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/OWNERS index b9e1568abd..1cdda19f30 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/persistentvolume/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/OWNERS @@ -1,3 +1,4 @@ assignees: + - jsafrane - saad-ali - thockin diff --git a/vendor/k8s.io/kubernetes/pkg/controller/persistentvolume/controller.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/controller/persistentvolume/controller.go rename to vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller.go diff --git a/vendor/k8s.io/kubernetes/pkg/controller/persistentvolume/controller_base.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_base.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/controller/persistentvolume/controller_base.go rename to vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_base.go diff --git a/vendor/k8s.io/kubernetes/pkg/controller/persistentvolume/index.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/controller/persistentvolume/index.go rename to vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go diff --git a/vendor/k8s.io/kubernetes/pkg/controller/persistentvolume/volume_host.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/controller/persistentvolume/volume_host.go rename to vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go index 4a705e73ed..744216c812 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -71,7 +71,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/ioutils" "k8s.io/kubernetes/pkg/kubelet/util/queue" - kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volume" + "k8s.io/kubernetes/pkg/kubelet/volumemanager" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/types" @@ -501,9 +501,9 @@ func NewMainKubelet( return nil, err } - klet.volumeManager, err = kubeletvolume.NewVolumeManager( + klet.volumeManager, err = volumemanager.NewVolumeManager( enableControllerAttachDetach, - hostname, + nodeName, klet.podManager, klet.kubeClient, klet.volumePluginMgr, @@ -687,7 +687,7 @@ type Kubelet struct { // VolumeManager runs a set of asynchronous loops that figure out which // volumes need to be attached/mounted/unmounted/detached based on the pods // scheduled on this node and makes it so. - volumeManager kubeletvolume.VolumeManager + volumeManager volumemanager.VolumeManager // Cloud provider interface. cloud cloudprovider.Interface @@ -977,7 +977,9 @@ func (kl *Kubelet) initializeModules() error { // initializeRuntimeDependentModules will initialize internal modules that require the container runtime to be up. func (kl *Kubelet) initializeRuntimeDependentModules() { if err := kl.cadvisor.Start(); err != nil { - kl.runtimeState.setInternalError(fmt.Errorf("Failed to start cAdvisor %v", err)) + // Fail kubelet and rely on the babysitter to retry starting kubelet. + // TODO(random-liu): Add backoff logic in the babysitter + glog.Fatalf("Failed to start cAdvisor %v", err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/rkt/rkt.go b/vendor/k8s.io/kubernetes/pkg/kubelet/rkt/rkt.go index 1880efdc2d..a39272e8ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/rkt/rkt.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/rkt/rkt.go @@ -1631,13 +1631,6 @@ func (r *Runtime) KillPod(pod *api.Pod, runningPod kubecontainer.Pod, gracePerio r.containerRefManager.ClearRef(c.ID) } - // Touch the systemd service file to update the mod time so it will - // not be garbage collected too soon. - if err := r.os.Chtimes(serviceFile, time.Now(), time.Now()); err != nil { - glog.Errorf("rkt: Failed to change the modification time of the service file %q: %v", serviceName, err) - return err - } - // Since all service file have 'KillMode=mixed', the processes in // the unit's cgroup will receive a SIGKILL if the normal stop timeouts. reschan := make(chan string) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/OWNERS new file mode 100644 index 0000000000..73ab6a21c9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/OWNERS @@ -0,0 +1,2 @@ +assignees: + - saad-ali diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volume/cache/actual_state_of_world.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/kubelet/volume/cache/actual_state_of_world.go rename to vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index 4dd5f5444f..8c23e59e41 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volume/cache/actual_state_of_world.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -51,14 +51,6 @@ type ActualStateOfWorld interface { // operationexecutor to interact with it. operationexecutor.ActualStateOfWorldAttacherUpdater - // AddVolume adds the given volume to the cache indicating the specified - // volume is attached to this node. A unique volume name is generated from - // the volumeSpec and returned on success. - // If a volume with the same generated name already exists, this is a noop. - // If no volume plugin can support the given volumeSpec or more than one - // plugin can support it, an error is returned. - AddVolume(volumeSpec *volume.Spec, devicePath string) (api.UniqueVolumeName, error) - // AddPodToVolume adds the given pod to the given volume in the cache // indicating the specified volume has been successfully mounted to the // specified pod. @@ -274,9 +266,8 @@ type mountedPod struct { } func (asw *actualStateOfWorld) MarkVolumeAsAttached( - volumeSpec *volume.Spec, nodeName string, devicePath string) error { - _, err := asw.AddVolume(volumeSpec, devicePath) - return err + volumeName api.UniqueVolumeName, volumeSpec *volume.Spec, _, devicePath string) error { + return asw.addVolume(volumeName, volumeSpec, devicePath) } func (asw *actualStateOfWorld) MarkVolumeAsDetached( @@ -315,27 +306,34 @@ func (asw *actualStateOfWorld) MarkDeviceAsUnmounted( return asw.SetVolumeGloballyMounted(volumeName, false /* globallyMounted */) } -func (asw *actualStateOfWorld) AddVolume( - volumeSpec *volume.Spec, devicePath string) (api.UniqueVolumeName, error) { +// addVolume adds the given volume to the cache indicating the specified +// volume is attached to this node. If no volume name is supplied, a unique +// volume name is generated from the volumeSpec and returned on success. If a +// volume with the same generated name already exists, this is a noop. If no +// volume plugin can support the given volumeSpec or more than one plugin can +// support it, an error is returned. +func (asw *actualStateOfWorld) addVolume( + volumeName api.UniqueVolumeName, volumeSpec *volume.Spec, devicePath string) error { asw.Lock() defer asw.Unlock() volumePlugin, err := asw.volumePluginMgr.FindPluginBySpec(volumeSpec) if err != nil || volumePlugin == nil { - return "", fmt.Errorf( + return fmt.Errorf( "failed to get Plugin from volumeSpec for volume %q err=%v", volumeSpec.Name(), err) } - volumeName, err := - volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec) - if err != nil { - return "", fmt.Errorf( - "failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v", - volumeSpec.Name(), - volumePlugin.GetPluginName(), - err) + if len(volumeName) == 0 { + volumeName, err = volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec) + if err != nil { + return fmt.Errorf( + "failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v", + volumeSpec.Name(), + volumePlugin.GetPluginName(), + err) + } } pluginIsAttachable := false @@ -357,7 +355,7 @@ func (asw *actualStateOfWorld) AddVolume( asw.attachedVolumes[volumeName] = volumeObj } - return volumeObj.volumeName, nil + return nil } func (asw *actualStateOfWorld) AddPodToVolume( diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volume/cache/desired_state_of_world.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/kubelet/volume/cache/desired_state_of_world.go rename to vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go index 673897d8e7..51cc7900c9 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volume/cache/desired_state_of_world.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go @@ -183,14 +183,27 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( err) } - volumeName, err := - volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec) - if err != nil { - return "", fmt.Errorf( - "failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v", - volumeSpec.Name(), - volumePlugin.GetPluginName(), - err) + var volumeName api.UniqueVolumeName + + // The unique volume name used depends on whether the volume is attachable + // or not. + attachable := dsw.isAttachableVolume(volumeSpec) + if attachable { + // For attachable volumes, use the unique volume name as reported by + // the plugin. + volumeName, err = + volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec) + if err != nil { + return "", fmt.Errorf( + "failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v", + volumeSpec.Name(), + volumePlugin.GetPluginName(), + err) + } + } else { + // For non-attachable volumes, generate a unique name based on the pod + // namespace and name and the name of the volume within the pod. + volumeName = volumehelper.GetUniqueVolumeNameForNonAttachableVolume(podName, volumePlugin, outerVolumeSpecName) } volumeObj, volumeExists := dsw.volumesToMount[volumeName] @@ -198,7 +211,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( volumeObj = volumeToMount{ volumeName: volumeName, podsToMount: make(map[types.UniquePodName]podToMount), - pluginIsAttachable: dsw.isAttachableVolume(volumeSpec), + pluginIsAttachable: attachable, volumeGidValue: volumeGidValue, reportedInUse: false, } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volume/populator/desired_state_of_world_populator.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/kubelet/volume/populator/desired_state_of_world_populator.go rename to vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index eecd802206..0991c66a81 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volume/populator/desired_state_of_world_populator.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -32,7 +32,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/pod" "k8s.io/kubernetes/pkg/kubelet/util/format" - "k8s.io/kubernetes/pkg/kubelet/volume/cache" + "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/volume" diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volume/reconciler/reconciler.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go similarity index 88% rename from vendor/k8s.io/kubernetes/pkg/kubelet/volume/reconciler/reconciler.go rename to vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go index 2f27bb9cab..5063379acd 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volume/reconciler/reconciler.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -24,9 +24,10 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/kubelet/volume/cache" - "k8s.io/kubernetes/pkg/util/goroutinemap" + "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" + "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" ) @@ -117,9 +118,9 @@ func (rc *reconciler) reconciliationLoopFunc() func() { err := rc.operationExecutor.UnmountVolume( mountedVolume.MountedVolume, rc.actualStateOfWorld) if err != nil && - !goroutinemap.IsAlreadyExists(err) && - !goroutinemap.IsExponentialBackoff(err) { - // Ignore goroutinemap.IsAlreadyExists and goroutinemap.IsExponentialBackoff errors, they are expected. + !nestedpendingoperations.IsAlreadyExists(err) && + !exponentialbackoff.IsExponentialBackoff(err) { + // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. glog.Errorf( "operationExecutor.UnmountVolume failed for volume %q (spec.Name: %q) pod %q (UID: %q) controllerAttachDetachEnabled: %v with err: %v", @@ -158,9 +159,9 @@ func (rc *reconciler) reconciliationLoopFunc() func() { rc.hostName, rc.actualStateOfWorld) if err != nil && - !goroutinemap.IsAlreadyExists(err) && - !goroutinemap.IsExponentialBackoff(err) { - // Ignore goroutinemap.IsAlreadyExists and goroutinemap.IsExponentialBackoff errors, they are expected. + !nestedpendingoperations.IsAlreadyExists(err) && + !exponentialbackoff.IsExponentialBackoff(err) { + // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. glog.Errorf( "operationExecutor.VerifyControllerAttachedVolume failed for volume %q (spec.Name: %q) pod %q (UID: %q) controllerAttachDetachEnabled: %v with err: %v", @@ -193,9 +194,9 @@ func (rc *reconciler) reconciliationLoopFunc() func() { volumeToMount.Pod.UID) err := rc.operationExecutor.AttachVolume(volumeToAttach, rc.actualStateOfWorld) if err != nil && - !goroutinemap.IsAlreadyExists(err) && - !goroutinemap.IsExponentialBackoff(err) { - // Ignore goroutinemap.IsAlreadyExists and goroutinemap.IsExponentialBackoff errors, they are expected. + !nestedpendingoperations.IsAlreadyExists(err) && + !exponentialbackoff.IsExponentialBackoff(err) { + // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. glog.Errorf( "operationExecutor.AttachVolume failed for volume %q (spec.Name: %q) pod %q (UID: %q) controllerAttachDetachEnabled: %v with err: %v", @@ -231,9 +232,9 @@ func (rc *reconciler) reconciliationLoopFunc() func() { volumeToMount.VolumeToMount, rc.actualStateOfWorld) if err != nil && - !goroutinemap.IsAlreadyExists(err) && - !goroutinemap.IsExponentialBackoff(err) { - // Ignore goroutinemap.IsAlreadyExists and goroutinemap.IsExponentialBackoff errors, they are expected. + !nestedpendingoperations.IsAlreadyExists(err) && + !exponentialbackoff.IsExponentialBackoff(err) { + // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. glog.Errorf( "operationExecutor.MountVolume failed for volume %q (spec.Name: %q) pod %q (UID: %q) controllerAttachDetachEnabled: %v with err: %v", @@ -266,9 +267,9 @@ func (rc *reconciler) reconciliationLoopFunc() func() { err := rc.operationExecutor.UnmountDevice( attachedVolume.AttachedVolume, rc.actualStateOfWorld) if err != nil && - !goroutinemap.IsAlreadyExists(err) && - !goroutinemap.IsExponentialBackoff(err) { - // Ignore goroutinemap.IsAlreadyExists and goroutinemap.IsExponentialBackoff errors, they are expected. + !nestedpendingoperations.IsAlreadyExists(err) && + !exponentialbackoff.IsExponentialBackoff(err) { + // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. glog.Errorf( "operationExecutor.UnmountDevice failed for volume %q (spec.Name: %q) controllerAttachDetachEnabled: %v with err: %v", @@ -297,9 +298,9 @@ func (rc *reconciler) reconciliationLoopFunc() func() { err := rc.operationExecutor.DetachVolume( attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld) if err != nil && - !goroutinemap.IsAlreadyExists(err) && - !goroutinemap.IsExponentialBackoff(err) { - // Ignore goroutinemap.IsAlreadyExists && goroutinemap.IsExponentialBackoff errors, they are expected. + !nestedpendingoperations.IsAlreadyExists(err) && + !exponentialbackoff.IsExponentialBackoff(err) { + // Ignore nestedpendingoperations.IsAlreadyExists && exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. glog.Errorf( "operationExecutor.DetachVolume failed for volume %q (spec.Name: %q) controllerAttachDetachEnabled: %v with err: %v", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volume/volume_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/kubelet/volume/volume_manager.go rename to vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go index 4976ccb9e2..5a7c4c650d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volume/volume_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go @@ -28,9 +28,9 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/pod" "k8s.io/kubernetes/pkg/kubelet/util/format" - "k8s.io/kubernetes/pkg/kubelet/volume/cache" - "k8s.io/kubernetes/pkg/kubelet/volume/populator" - "k8s.io/kubernetes/pkg/kubelet/volume/reconciler" + "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" + "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator" + "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler" "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" diff --git a/vendor/k8s.io/kubernetes/pkg/master/master.go b/vendor/k8s.io/kubernetes/pkg/master/master.go index a6c4b32366..5682146229 100644 --- a/vendor/k8s.io/kubernetes/pkg/master/master.go +++ b/vendor/k8s.io/kubernetes/pkg/master/master.go @@ -1061,6 +1061,7 @@ func DefaultAPIResourceConfigSource() *genericapiserver.ResourceConfig { extensionsapiv1beta1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"), extensionsapiv1beta1.SchemeGroupVersion.WithResource("ingresses"), extensionsapiv1beta1.SchemeGroupVersion.WithResource("jobs"), + extensionsapiv1beta1.SchemeGroupVersion.WithResource("networkpolicies"), extensionsapiv1beta1.SchemeGroupVersion.WithResource("replicasets"), extensionsapiv1beta1.SchemeGroupVersion.WithResource("thirdpartyresources"), ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/OWNERS b/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/OWNERS new file mode 100644 index 0000000000..73ab6a21c9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/OWNERS @@ -0,0 +1,2 @@ +assignees: + - saad-ali diff --git a/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff/exponential_backoff.go b/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff/exponential_backoff.go new file mode 100644 index 0000000000..fb8c125c5b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff/exponential_backoff.go @@ -0,0 +1,120 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package exponentialbackoff contains logic for implementing exponential +// backoff for GoRoutineMap and NestedPendingOperations. +package exponentialbackoff + +import ( + "fmt" + "time" +) + +const ( + // initialDurationBeforeRetry is the amount of time after an error occurs + // that GoroutineMap will refuse to allow another operation to start with + // the same target (if exponentialBackOffOnError is enabled). Each + // successive error results in a wait 2x times the previous. + initialDurationBeforeRetry time.Duration = 500 * time.Millisecond + + // maxDurationBeforeRetry is the maximum amount of time that + // durationBeforeRetry will grow to due to exponential backoff. + maxDurationBeforeRetry time.Duration = 2 * time.Minute +) + +// ExponentialBackoff contains the last occurrence of an error and the duration +// that retries are not permitted. +type ExponentialBackoff struct { + lastError error + lastErrorTime time.Time + durationBeforeRetry time.Duration +} + +// SafeToRetry returns an error if the durationBeforeRetry period for the given +// lastErrorTime has not yet expired. Otherwise it returns nil. +func (expBackoff *ExponentialBackoff) SafeToRetry(operationName string) error { + if time.Since(expBackoff.lastErrorTime) <= expBackoff.durationBeforeRetry { + return NewExponentialBackoffError(operationName, *expBackoff) + } + + return nil +} + +func (expBackoff *ExponentialBackoff) Update(err *error) { + if expBackoff.durationBeforeRetry == 0 { + expBackoff.durationBeforeRetry = initialDurationBeforeRetry + } else { + expBackoff.durationBeforeRetry = 2 * expBackoff.durationBeforeRetry + if expBackoff.durationBeforeRetry > maxDurationBeforeRetry { + expBackoff.durationBeforeRetry = maxDurationBeforeRetry + } + } + + expBackoff.lastError = *err + expBackoff.lastErrorTime = time.Now() +} + +func (expBackoff *ExponentialBackoff) GenerateNoRetriesPermittedMsg( + operationName string) string { + return fmt.Sprintf("Operation for %q failed. No retries permitted until %v (durationBeforeRetry %v). Error: %v", + operationName, + expBackoff.lastErrorTime.Add(expBackoff.durationBeforeRetry), + expBackoff.durationBeforeRetry, + expBackoff.lastError) +} + +// NewExponentialBackoffError returns a new instance of ExponentialBackoff error. +func NewExponentialBackoffError( + operationName string, expBackoff ExponentialBackoff) error { + return exponentialBackoffError{ + operationName: operationName, + expBackoff: expBackoff, + } +} + +// IsExponentialBackoff returns true if an error returned from GoroutineMap +// indicates that a new operation can not be started because +// exponentialBackOffOnError is enabled and a previous operation with the same +// operation failed within the durationBeforeRetry period. +func IsExponentialBackoff(err error) bool { + switch err.(type) { + case exponentialBackoffError: + return true + default: + return false + } +} + +// exponentialBackoffError is the error returned returned from GoroutineMap when +// a new operation can not be started because exponentialBackOffOnError is +// enabled and a previous operation with the same operation failed within the +// durationBeforeRetry period. +type exponentialBackoffError struct { + operationName string + expBackoff ExponentialBackoff +} + +var _ error = exponentialBackoffError{} + +func (err exponentialBackoffError) Error() string { + return fmt.Sprintf( + "Failed to create operation with name %q. An operation with that name failed at %v. No retries permitted until %v (%v). Last error: %q.", + err.operationName, + err.expBackoff.lastErrorTime, + err.expBackoff.lastErrorTime.Add(err.expBackoff.durationBeforeRetry), + err.expBackoff.durationBeforeRetry, + err.expBackoff.lastError) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/goroutinemap.go b/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/goroutinemap.go index af9c1eeb84..3ca5f68701 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/goroutinemap.go +++ b/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/goroutinemap.go @@ -23,18 +23,18 @@ package goroutinemap import ( "fmt" - "runtime" "sync" "time" "github.com/golang/glog" + "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" k8sRuntime "k8s.io/kubernetes/pkg/util/runtime" ) const ( // initialDurationBeforeRetry is the amount of time after an error occurs // that GoRoutineMap will refuse to allow another operation to start with - // the same operationName (if exponentialBackOffOnError is enabled). Each + // the same operation name (if exponentialBackOffOnError is enabled). Each // successive error results in a wait 2x times the previous. initialDurationBeforeRetry time.Duration = 500 * time.Millisecond @@ -45,12 +45,13 @@ const ( // GoRoutineMap defines the supported set of operations. type GoRoutineMap interface { - // Run adds operationName to the list of running operations and spawns a new - // go routine to execute the operation. If an operation with the same name - // already exists, an error is returned. Once the operation is complete, the - // go routine is terminated and the operationName is removed from the list - // of executing operations allowing a new operation to be started with the - // same name without error. + // Run adds operation name to the list of running operations and spawns a + // new go routine to execute the operation. + // If an operation with the same operation name already exists, an + // AlreadyExists or ExponentialBackoff error is returned. + // Once the operation is complete, the go routine is terminated and the + // operation name is removed from the list of executing operations allowing + // a new operation to be started with the same operation name without error. Run(operationName string, operationFunc func() error) error // Wait blocks until all operations are completed. This is typically @@ -61,65 +62,72 @@ type GoRoutineMap interface { // NewGoRoutineMap returns a new instance of GoRoutineMap. func NewGoRoutineMap(exponentialBackOffOnError bool) GoRoutineMap { - return &goRoutineMap{ + g := &goRoutineMap{ operations: make(map[string]operation), exponentialBackOffOnError: exponentialBackOffOnError, + lock: &sync.Mutex{}, } + + g.cond = sync.NewCond(g.lock) + return g } type goRoutineMap struct { operations map[string]operation exponentialBackOffOnError bool - wg sync.WaitGroup - sync.Mutex + cond *sync.Cond + lock *sync.Mutex } type operation struct { - operationPending bool - lastError error - lastErrorTime time.Time - durationBeforeRetry time.Duration + operationPending bool + expBackoff exponentialbackoff.ExponentialBackoff } -func (grm *goRoutineMap) Run(operationName string, operationFunc func() error) error { - grm.Lock() - defer grm.Unlock() +func (grm *goRoutineMap) Run( + operationName string, + operationFunc func() error) error { + grm.lock.Lock() + defer grm.lock.Unlock() + existingOp, exists := grm.operations[operationName] if exists { // Operation with name exists if existingOp.operationPending { - return newAlreadyExistsError(operationName) + return NewAlreadyExistsError(operationName) } - if time.Since(existingOp.lastErrorTime) <= existingOp.durationBeforeRetry { - return newExponentialBackoffError(operationName, existingOp) + if err := existingOp.expBackoff.SafeToRetry(operationName); err != nil { + return err } } grm.operations[operationName] = operation{ - operationPending: true, - lastError: existingOp.lastError, - lastErrorTime: existingOp.lastErrorTime, - durationBeforeRetry: existingOp.durationBeforeRetry, + operationPending: true, + expBackoff: existingOp.expBackoff, } - grm.wg.Add(1) go func() (err error) { // Handle unhandled panics (very unlikely) defer k8sRuntime.HandleCrash() // Handle completion of and error, if any, from operationFunc() defer grm.operationComplete(operationName, &err) // Handle panic, if any, from operationFunc() - defer recoverFromPanic(operationName, &err) + defer k8sRuntime.RecoverFromPanic(&err) return operationFunc() }() return nil } -func (grm *goRoutineMap) operationComplete(operationName string, err *error) { - defer grm.wg.Done() - grm.Lock() - defer grm.Unlock() +func (grm *goRoutineMap) operationComplete( + operationName string, err *error) { + // Defer operations are executed in Last-In is First-Out order. In this case + // the lock is acquired first when operationCompletes begins, and is + // released when the method finishes, after the lock is released cond is + // signaled to wake waiting goroutine. + defer grm.cond.Signal() + grm.lock.Lock() + defer grm.lock.Unlock() if *err == nil || !grm.exponentialBackOffOnError { // Operation completed without error, or exponentialBackOffOnError disabled @@ -133,70 +141,33 @@ func (grm *goRoutineMap) operationComplete(operationName string, err *error) { } else { // Operation completed with error and exponentialBackOffOnError Enabled existingOp := grm.operations[operationName] - if existingOp.durationBeforeRetry == 0 { - existingOp.durationBeforeRetry = initialDurationBeforeRetry - } else { - existingOp.durationBeforeRetry = 2 * existingOp.durationBeforeRetry - if existingOp.durationBeforeRetry > maxDurationBeforeRetry { - existingOp.durationBeforeRetry = maxDurationBeforeRetry - } - } - existingOp.lastError = *err - existingOp.lastErrorTime = time.Now() + existingOp.expBackoff.Update(err) existingOp.operationPending = false - grm.operations[operationName] = existingOp // Log error - glog.Errorf("Operation for %q failed. No retries permitted until %v (durationBeforeRetry %v). error: %v", - operationName, - existingOp.lastErrorTime.Add(existingOp.durationBeforeRetry), - existingOp.durationBeforeRetry, - *err) + glog.Errorf("%v", + existingOp.expBackoff.GenerateNoRetriesPermittedMsg(operationName)) } } func (grm *goRoutineMap) Wait() { - grm.wg.Wait() -} + grm.lock.Lock() + defer grm.lock.Unlock() -func recoverFromPanic(operationName string, err *error) { - if r := recover(); r != nil { - callers := "" - for i := 0; true; i++ { - _, file, line, ok := runtime.Caller(i) - if !ok { - break - } - callers = callers + fmt.Sprintf("%v:%v\n", file, line) - } - *err = fmt.Errorf( - "operation for %q recovered from panic %q. (err=%v) Call stack:\n%v", - operationName, - r, - *err, - callers) + for len(grm.operations) > 0 { + grm.cond.Wait() } } -// alreadyExistsError is the error returned when NewGoRoutine() detects that -// an operation with the given name is already running. -type alreadyExistsError struct { - operationName string -} - -var _ error = alreadyExistsError{} - -func (err alreadyExistsError) Error() string { - return fmt.Sprintf("Failed to create operation with name %q. An operation with that name is already executing.", err.operationName) -} - -func newAlreadyExistsError(operationName string) error { +// NewAlreadyExistsError returns a new instance of AlreadyExists error. +func NewAlreadyExistsError(operationName string) error { return alreadyExistsError{operationName} } -// IsAlreadyExists returns true if an error returned from NewGoRoutine indicates -// that operation with the same name already exists. +// IsAlreadyExists returns true if an error returned from GoRoutineMap indicates +// a new operation can not be started because an operation with the same +// operation name is already executing. func IsAlreadyExists(err error) bool { switch err.(type) { case alreadyExistsError: @@ -206,42 +177,17 @@ func IsAlreadyExists(err error) bool { } } -// exponentialBackoffError is the error returned when NewGoRoutine() detects -// that the previous operation for given name failed less then -// durationBeforeRetry. -type exponentialBackoffError struct { +// alreadyExistsError is the error returned by GoRoutineMap when a new operation +// can not be started because an operation with the same operation name is +// already executing. +type alreadyExistsError struct { operationName string - failedOp operation } -var _ error = exponentialBackoffError{} +var _ error = alreadyExistsError{} -func (err exponentialBackoffError) Error() string { +func (err alreadyExistsError) Error() string { return fmt.Sprintf( - "Failed to create operation with name %q. An operation with that name failed at %v. No retries permitted until %v (%v). Last error: %q.", - err.operationName, - err.failedOp.lastErrorTime, - err.failedOp.lastErrorTime.Add(err.failedOp.durationBeforeRetry), - err.failedOp.durationBeforeRetry, - err.failedOp.lastError) -} - -func newExponentialBackoffError( - operationName string, failedOp operation) error { - return exponentialBackoffError{ - operationName: operationName, - failedOp: failedOp, - } -} - -// IsExponentialBackoff returns true if an error returned from NewGoRoutine() -// indicates that the previous operation for given name failed less then -// durationBeforeRetry. -func IsExponentialBackoff(err error) bool { - switch err.(type) { - case exponentialBackoffError: - return true - default: - return false - } + "Failed to create operation with name %q. An operation with that name is already executing.", + err.operationName) } diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go index 66959697c5..59e5f3341a 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go @@ -107,7 +107,8 @@ func doMount(source string, target string, fstype string, options []string) erro command := exec.Command("mount", mountArgs...) output, err := command.CombinedOutput() if err != nil { - return fmt.Errorf("Mount failed: %v\nMounting arguments: %s %s %s %v\nOutput: %s\n", + glog.Errorf("Mount failed: %v\nMounting arguments: %s %s %s %v\nOutput: %s\n", err, source, target, fstype, options, string(output)) + return fmt.Errorf("mount failed: %v\nMounting arguments: %s %s %s %v\nOutput: %s\n", err, source, target, fstype, options, string(output)) } return err @@ -249,6 +250,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, options = append(options, "defaults") // Run fsck on the disk to fix repairable issues + glog.V(4).Infof("Checking for issues with fsck on disk: %s", source) args := []string{"-a", source} cmd := mounter.Runner.Command("fsck", args...) out, err := cmd.CombinedOutput() @@ -267,6 +269,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, } // Try to mount the disk + glog.V(4).Infof("Attempting to mount disk: %s %s %s", fstype, source, target) err = mounter.Interface.Mount(source, target, fstype, options) if err != nil { // It is possible that this disk is not formatted. Double check using diskLooksUnformatted @@ -281,12 +284,15 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, if fstype == "ext4" || fstype == "ext3" { args = []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", source} } + glog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args) cmd := mounter.Runner.Command("mkfs."+fstype, args...) _, err := cmd.CombinedOutput() if err == nil { // the disk has been formatted successfully try to mount it again. + glog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target) return mounter.Interface.Mount(source, target, fstype, options) } + glog.Errorf("format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", source, fstype, target, options, err) return err } } @@ -297,6 +303,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, func (mounter *SafeFormatAndMount) diskLooksUnformatted(disk string) (bool, error) { args := []string{"-nd", "-o", "FSTYPE", disk} cmd := mounter.Runner.Command("lsblk", args...) + glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args) dataOut, err := cmd.CombinedOutput() output := strings.TrimSpace(string(dataOut)) @@ -304,6 +311,7 @@ func (mounter *SafeFormatAndMount) diskLooksUnformatted(disk string) (bool, erro // an error if so. if err != nil { + glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err) return false, err } diff --git a/vendor/k8s.io/kubernetes/pkg/util/runtime/runtime.go b/vendor/k8s.io/kubernetes/pkg/util/runtime/runtime.go index f404d25d19..641846a2d2 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/kubernetes/pkg/util/runtime/runtime.go @@ -18,8 +18,9 @@ package runtime import ( "fmt" - "github.com/golang/glog" "runtime" + + "github.com/golang/glog" ) // For testing, bypass HandleCrash. @@ -47,6 +48,11 @@ func HandleCrash(additionalHandlers ...func(interface{})) { // logPanic logs the caller tree when a panic occurs. func logPanic(r interface{}) { + callers := getCallers(r) + glog.Errorf("Recovered from panic: %#v (%v)\n%v", r, r, callers) +} + +func getCallers(r interface{}) string { callers := "" for i := 0; true; i++ { _, file, line, ok := runtime.Caller(i) @@ -55,7 +61,8 @@ func logPanic(r interface{}) { } callers = callers + fmt.Sprintf("%v:%v\n", file, line) } - glog.Errorf("Recovered from panic: %#v (%v)\n%v", r, r, callers) + + return callers } // ErrorHandlers is a list of functions which will be invoked when an unreturnable @@ -92,3 +99,18 @@ func GetCaller() string { } return f.Name() } + +// RecoverFromPanic replaces the specified error with an error containing the +// original error, and the call tree when a panic occurs. This enables error +// handlers to handle errors and panics the same way. +func RecoverFromPanic(err *error) { + if r := recover(); r != nil { + callers := getCallers(r) + + *err = fmt.Errorf( + "recovered from panic %q. (err=%v) Call stack:\n%v", + r, + *err, + callers) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/version/base.go b/vendor/k8s.io/kubernetes/pkg/version/base.go index 68d82ad506..0b41ab88f6 100644 --- a/vendor/k8s.io/kubernetes/pkg/version/base.go +++ b/vendor/k8s.io/kubernetes/pkg/version/base.go @@ -51,7 +51,7 @@ var ( // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. - gitVersion string = "v1.3.3+$Format:%h$" + gitVersion string = "v1.3.4+$Format:%h$" gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" diff --git a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go index 43ba109794..fba3ed1f03 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go @@ -147,6 +147,7 @@ func (plugin *awsElasticBlockStorePlugin) NewDeleter(spec *volume.Spec) (volume. func (plugin *awsElasticBlockStorePlugin) newDeleterInternal(spec *volume.Spec, manager ebsManager) (volume.Deleter, error) { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore == nil { + glog.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil") return nil, fmt.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil") } return &awsElasticBlockStoreDeleter{ @@ -242,6 +243,7 @@ func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { + glog.Errorf("cannot validate mount point: %s %v", dir, err) return err } if !notMnt { @@ -263,17 +265,17 @@ func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error if err != nil { notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr) return err } if !notMnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + glog.Errorf("failed to unmount %s: %v", dir, mntErr) return err } notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr) return err } if !notMnt { @@ -283,6 +285,7 @@ func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error } } os.Remove(dir) + glog.Errorf("Mount of disk %s failed: %v", dir, err) return err } @@ -290,6 +293,7 @@ func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error volume.SetVolumeOwnership(b, fsGroup) } + glog.V(4).Infof("Successfully mounted %s", dir) return nil } @@ -305,10 +309,12 @@ func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (stri basePath := path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), "mounts") rel, err := filepath.Rel(basePath, globalPath) if err != nil { + glog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err) return "", err } if strings.Contains(rel, "../") { - return "", fmt.Errorf("Unexpected mount path: " + globalPath) + glog.Errorf("Unexpected mount path: %s", globalPath) + return "", fmt.Errorf("unexpected mount path: " + globalPath) } // Reverse the :// replacement done in makeGlobalPDPath volumeID := rel @@ -391,6 +397,7 @@ var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{} func (c *awsElasticBlockStoreProvisioner) Provision() (*api.PersistentVolume, error) { volumeID, sizeGB, labels, err := c.manager.CreateVolume(c) if err != nil { + glog.Errorf("Provision failed: %v", err) return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go index c3097f31c0..76ebe72adc 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go @@ -166,8 +166,8 @@ func pathExists(path string) (bool, error) { } // Return cloud provider -func getCloudProvider(cloudProvider cloudprovider.Interface) (*aws.AWSCloud, error) { - awsCloudProvider, ok := cloudProvider.(*aws.AWSCloud) +func getCloudProvider(cloudProvider cloudprovider.Interface) (*aws.Cloud, error) { + awsCloudProvider, ok := cloudProvider.(*aws.Cloud) if !ok || awsCloudProvider == nil { return nil, fmt.Errorf("Failed to get AWS Cloud Provider. GetCloudProvider returned %v instead", cloudProvider) } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go index 1f3e32b15d..e161f5a4c7 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go @@ -277,7 +277,7 @@ func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { // TODO: handle failed mounts here. notmnt, err := b.mounter.IsLikelyNotMountPoint(dir) if err != nil && !os.IsNotExist(err) { - glog.V(4).Infof("IsLikelyNotMountPoint failed: %v", err) + glog.Errorf("Cannot validate mount point: %s %v", dir, err) return err } if !notmnt { @@ -299,6 +299,7 @@ func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } // Perform a bind mount to the full path to allow duplicate mounts of the same PD. + glog.V(4).Infof("Attempting to mount cinder volume %s to %s with options %v", b.pdName, dir, options) err = b.mounter.Mount(globalPDPath, dir, "", options) if err != nil { glog.V(4).Infof("Mount failed: %v", err) @@ -326,6 +327,7 @@ func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { os.Remove(dir) // TODO: we should really eject the attach/detach out into its own control loop. detachDiskLogError(b.cinderVolume) + glog.Errorf("Failed to mount %s: %v", dir, err) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go b/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go index bd6c3407ec..baabc7dde7 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go @@ -60,7 +60,10 @@ func (plugin *configMapPlugin) GetVolumeName(spec *volume.Spec) (string, error) return "", fmt.Errorf("Spec does not reference a ConfigMap volume type") } - return volumeSource.Name, nil + return fmt.Sprintf( + "%v/%v", + spec.Name(), + volumeSource.Name), nil } func (plugin *configMapPlugin) CanSupport(spec *volume.Spec) bool { @@ -118,12 +121,14 @@ func (sv *configMapVolume) GetAttributes() volume.Attributes { } } -// This is the spec for the volume that this plugin wraps. -var wrappedVolumeSpec = volume.Spec{ - // This should be on a tmpfs instead of the local disk; the problem is - // charging the memory for the tmpfs to the right cgroup. We should make - // this a tmpfs when we can do the accounting correctly. - Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, +func wrappedVolumeSpec() volume.Spec { + // This is the spec for the volume that this plugin wraps. + return volume.Spec{ + // This should be on a tmpfs instead of the local disk; the problem is + // charging the memory for the tmpfs to the right cgroup. We should make + // this a tmpfs when we can do the accounting correctly. + Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, + } } func (b *configMapVolumeMounter) SetUp(fsGroup *int64) error { @@ -134,7 +139,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir) // Wrap EmptyDir, let it do the setup. - wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, &b.pod, *b.opts) + wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, *b.opts) if err != nil { return err } @@ -233,7 +238,7 @@ func (c *configMapVolumeUnmounter) TearDownAt(dir string) error { glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir) // Wrap EmptyDir, let it do the teardown. - wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID) + wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID) if err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go b/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go index 6b5fbf8ab0..e5c58c4b68 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go @@ -49,8 +49,10 @@ type downwardAPIPlugin struct { var _ volume.VolumePlugin = &downwardAPIPlugin{} -var wrappedVolumeSpec = volume.Spec{ - Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}}, +func wrappedVolumeSpec() volume.Spec { + return volume.Spec{ + Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}}, + } } func (plugin *downwardAPIPlugin) Init(host volume.VolumeHost) error { @@ -144,7 +146,7 @@ func (b *downwardAPIVolumeMounter) SetUp(fsGroup *int64) error { func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { glog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir) // Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting - wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, b.pod, *b.opts) + wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), b.pod, *b.opts) if err != nil { glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) return err @@ -233,7 +235,7 @@ func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error { glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir) // Wrap EmptyDir, let it do the teardown. - wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID) + wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID) if err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/flexvolume.go b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/flexvolume.go index 1fa75c742a..6ea0ef7c64 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/flexvolume.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/flexvolume.go @@ -265,7 +265,7 @@ func (f *flexVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { notmnt, err := f.blockDeviceMounter.IsLikelyNotMountPoint(dir) if err != nil && !os.IsNotExist(err) { - glog.Errorf("Cannot validate mountpoint: %s", dir) + glog.Errorf("Cannot validate mount point: %s %v", dir, err) return err } if !notmnt { @@ -290,18 +290,20 @@ func (f *flexVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { f.options[optionKeySecret+"/"+name] = secret } + glog.V(4).Infof("attempting to attach volume: %s with options %v", f.volName, f.options) device, err := f.manager.attach(f) if err != nil { if !isCmdNotSupportedErr(err) { - glog.Errorf("Failed to attach volume: %s", f.volName) + glog.Errorf("failed to attach volume: %s", f.volName) return err } // Attach not supported or required. Continue to mount. } + glog.V(4).Infof("attempting to mount volume: %s", f.volName) if err := f.manager.mount(f, device, dir); err != nil { if !isCmdNotSupportedErr(err) { - glog.Errorf("Failed to mount volume: %s", f.volName) + glog.Errorf("failed to mount volume: %s", f.volName) return err } options := make([]string, 0) @@ -318,13 +320,15 @@ func (f *flexVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { os.MkdirAll(dir, 0750) // Mount not supported by driver. Use core mounting logic. + glog.V(4).Infof("attempting to mount the volume: %s to device: %s", f.volName, device) err = f.blockDeviceMounter.Mount(string(device), dir, f.fsType, options) if err != nil { - glog.Errorf("Failed to mount the volume: %s, device: %s, error: %s", f.volName, device, err.Error()) + glog.Errorf("failed to mount the volume: %s to device: %s, error: %v", f.volName, device, err) return err } } + glog.V(4).Infof("Successfully mounted volume: %s on device: %s", f.volName, device) return nil } @@ -370,7 +374,7 @@ func (f *flexVolumeUnmounter) TearDownAt(dir string) error { } // Unmount not supported by the driver. Use core unmount logic. if err := f.mounter.Unmount(dir); err != nil { - glog.Errorf("Failed to unmount volume: %s, error: %s", dir, err.Error()) + glog.Errorf("Failed to unmount volume: %s, error: %v", dir, err) return err } } @@ -378,7 +382,7 @@ func (f *flexVolumeUnmounter) TearDownAt(dir string) error { if refCount == 1 { if err := f.manager.detach(f, device); err != nil { if !isCmdNotSupportedErr(err) { - glog.Errorf("Failed to teardown volume: %s, error: %s", dir, err.Error()) + glog.Errorf("Failed to teardown volume: %s, error: %v", dir, err) return err } // Teardown not supported by driver. Unmount is good enough. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go index b165f34e01..bc6cedc1ee 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go @@ -234,6 +234,7 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) glog.V(4).Infof("PersistentDisk set up: %s %v %v, pd name %v readOnly %v", dir, !notMnt, err, b.pdName, b.readOnly) if err != nil && !os.IsNotExist(err) { + glog.Errorf("cannot validate mount point: %s %v", dir, err) return err } if !notMnt { @@ -241,6 +242,7 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { } if err := os.MkdirAll(dir, 0750); err != nil { + glog.Errorf("mkdir failed on disk %s (%v)", dir, err) return err } @@ -251,6 +253,8 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { } globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName) + glog.V(4).Infof("attempting to mount %s", dir) + err = b.mounter.Mount(globalPDPath, dir, "", options) if err != nil { notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) @@ -275,6 +279,7 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { } } os.Remove(dir) + glog.Errorf("Mount of disk %s failed: %v", dir, err) return err } @@ -282,6 +287,7 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { volume.SetVolumeOwnership(b, fsGroup) } + glog.V(4).Infof("Successfully mounted %s", dir) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go b/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go index 3662222a01..2bc17f4091 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go @@ -41,8 +41,10 @@ type gitRepoPlugin struct { var _ volume.VolumePlugin = &gitRepoPlugin{} -var wrappedVolumeSpec = volume.Spec{ - Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, +func wrappedVolumeSpec() volume.Spec { + return volume.Spec{ + Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, + } } const ( @@ -61,7 +63,7 @@ func (plugin *gitRepoPlugin) GetPluginName() string { func (plugin *gitRepoPlugin) GetVolumeName(spec *volume.Spec) (string, error) { volumeSource, _ := getVolumeSource(spec) if volumeSource == nil { - return "", fmt.Errorf("Spec does not reference a GCE volume type") + return "", fmt.Errorf("Spec does not reference a Git repo volume type") } return fmt.Sprintf( @@ -155,7 +157,7 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } // Wrap EmptyDir, let it do the setup. - wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, &b.pod, b.opts) + wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, b.opts) if err != nil { return err } @@ -237,7 +239,7 @@ func (c *gitRepoVolumeUnmounter) TearDown() error { func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error { // Wrap EmptyDir, let it do the teardown. - wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID) + wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID) if err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go index 790e9482b4..9d5a622bc9 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go @@ -210,9 +210,10 @@ func (b *rbdMounter) SetUp(fsGroup *int64) error { func (b *rbdMounter) SetUpAt(dir string, fsGroup *int64) error { // diskSetUp checks mountpoints and prevent repeated calls + glog.V(4).Infof("rbd: attempting to SetUp and mount %s", dir) err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup) if err != nil { - glog.Errorf("rbd: failed to setup") + glog.Errorf("rbd: failed to setup mount %s %v", dir, err) } return err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go b/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go index e99229ba49..ec909cb699 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go @@ -45,8 +45,10 @@ type secretPlugin struct { var _ volume.VolumePlugin = &secretPlugin{} -var wrappedVolumeSpec = volume.Spec{ - Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}}, +func wrappedVolumeSpec() volume.Spec { + return volume.Spec{ + Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}}, + } } func getPath(uid types.UID, volName string, host volume.VolumeHost) string { @@ -150,7 +152,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir) // Wrap EmptyDir, let it do the setup. - wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, &b.pod, *b.opts) + wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, *b.opts) if err != nil { return err } @@ -249,7 +251,7 @@ func (c *secretVolumeUnmounter) TearDownAt(dir string) error { glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir) // Wrap EmptyDir, let it do the teardown. - wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID) + wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID) if err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/OWNERS new file mode 100644 index 0000000000..73ab6a21c9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/OWNERS @@ -0,0 +1,2 @@ +assignees: + - saad-ali diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go b/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go new file mode 100644 index 0000000000..0ba530d284 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go @@ -0,0 +1,287 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package nestedpendingoperations is a modified implementation of +pkg/util/goroutinemap. It implements a data structure for managing go routines +by volume/pod name. It prevents the creation of new go routines if an existing +go routine for the volume already exists. It also allows multiple operations to +execute in parallel for the same volume as long as they are operating on +different pods. +*/ +package nestedpendingoperations + +import ( + "fmt" + "sync" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" + k8sRuntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/volume/util/types" +) + +const ( + // emptyUniquePodName is a UniquePodName for empty string. + emptyUniquePodName types.UniquePodName = types.UniquePodName("") +) + +// NestedPendingOperations defines the supported set of operations. +type NestedPendingOperations interface { + // Run adds the concatenation of volumeName and podName to the list of + // running operations and spawns a new go routine to execute operationFunc. + // If an operation with the same volumeName and same or empty podName + // exists, an AlreadyExists or ExponentialBackoff error is returned. + // This enables multiple operations to execute in parallel for the same + // volumeName as long as they have different podName. + // Once the operation is complete, the go routine is terminated and the + // concatenation of volumeName and podName is removed from the list of + // executing operations allowing a new operation to be started with the + // volumeName without error. + Run(volumeName api.UniqueVolumeName, podName types.UniquePodName, operationFunc func() error) error + + // Wait blocks until all operations are completed. This is typically + // necessary during tests - the test should wait until all operations finish + // and evaluate results after that. + Wait() +} + +// NewNestedPendingOperations returns a new instance of NestedPendingOperations. +func NewNestedPendingOperations(exponentialBackOffOnError bool) NestedPendingOperations { + g := &nestedPendingOperations{ + operations: []operation{}, + exponentialBackOffOnError: exponentialBackOffOnError, + lock: &sync.Mutex{}, + } + g.cond = sync.NewCond(g.lock) + return g +} + +type nestedPendingOperations struct { + operations []operation + exponentialBackOffOnError bool + cond *sync.Cond + lock *sync.Mutex +} + +type operation struct { + volumeName api.UniqueVolumeName + podName types.UniquePodName + operationPending bool + expBackoff exponentialbackoff.ExponentialBackoff +} + +func (grm *nestedPendingOperations) Run( + volumeName api.UniqueVolumeName, + podName types.UniquePodName, + operationFunc func() error) error { + grm.lock.Lock() + defer grm.lock.Unlock() + + var previousOp operation + opExists := false + previousOpIndex := -1 + for previousOpIndex, previousOp = range grm.operations { + if previousOp.volumeName != volumeName { + // No match, keep searching + continue + } + + if previousOp.podName != emptyUniquePodName && + podName != emptyUniquePodName && + previousOp.podName != podName { + // No match, keep searching + continue + } + + // Match + opExists = true + break + } + + if opExists { + // Operation already exists + if previousOp.operationPending { + // Operation is pending + operationName := getOperationName(volumeName, podName) + return NewAlreadyExistsError(operationName) + } + + operationName := getOperationName(volumeName, podName) + if err := previousOp.expBackoff.SafeToRetry(operationName); err != nil { + return err + } + + // Update existing operation to mark as pending. + grm.operations[previousOpIndex].operationPending = true + grm.operations[previousOpIndex].volumeName = volumeName + grm.operations[previousOpIndex].podName = podName + } else { + // Create a new operation + grm.operations = append(grm.operations, + operation{ + operationPending: true, + volumeName: volumeName, + podName: podName, + expBackoff: exponentialbackoff.ExponentialBackoff{}, + }) + } + + go func() (err error) { + // Handle unhandled panics (very unlikely) + defer k8sRuntime.HandleCrash() + // Handle completion of and error, if any, from operationFunc() + defer grm.operationComplete(volumeName, podName, &err) + // Handle panic, if any, from operationFunc() + defer k8sRuntime.RecoverFromPanic(&err) + return operationFunc() + }() + + return nil +} + +func (grm *nestedPendingOperations) getOperation( + volumeName api.UniqueVolumeName, + podName types.UniquePodName) (uint, error) { + // Assumes lock has been acquired by caller. + + for i, op := range grm.operations { + if op.volumeName == volumeName && + op.podName == podName { + return uint(i), nil + } + } + + logOperationName := getOperationName(volumeName, podName) + return 0, fmt.Errorf("Operation %q not found.", logOperationName) +} + +func (grm *nestedPendingOperations) deleteOperation( + // Assumes lock has been acquired by caller. + volumeName api.UniqueVolumeName, + podName types.UniquePodName) { + + opIndex := -1 + for i, op := range grm.operations { + if op.volumeName == volumeName && + op.podName == podName { + opIndex = i + break + } + } + + // Delete index without preserving order + grm.operations[opIndex] = grm.operations[len(grm.operations)-1] + grm.operations = grm.operations[:len(grm.operations)-1] +} + +func (grm *nestedPendingOperations) operationComplete( + volumeName api.UniqueVolumeName, podName types.UniquePodName, err *error) { + // Defer operations are executed in Last-In is First-Out order. In this case + // the lock is acquired first when operationCompletes begins, and is + // released when the method finishes, after the lock is released cond is + // signaled to wake waiting goroutine. + defer grm.cond.Signal() + grm.lock.Lock() + defer grm.lock.Unlock() + + if *err == nil || !grm.exponentialBackOffOnError { + // Operation completed without error, or exponentialBackOffOnError disabled + grm.deleteOperation(volumeName, podName) + if *err != nil { + // Log error + logOperationName := getOperationName(volumeName, podName) + glog.Errorf("operation %s failed with: %v", + logOperationName, + *err) + } + return + } + + // Operation completed with error and exponentialBackOffOnError Enabled + existingOpIndex, getOpErr := grm.getOperation(volumeName, podName) + if getOpErr != nil { + // Failed to find existing operation + logOperationName := getOperationName(volumeName, podName) + glog.Errorf("Operation %s completed. error: %v. exponentialBackOffOnError is enabled, but failed to get operation to update.", + logOperationName, + *err) + return + } + + grm.operations[existingOpIndex].expBackoff.Update(err) + grm.operations[existingOpIndex].operationPending = false + + // Log error + operationName := + getOperationName(volumeName, podName) + glog.Errorf("%v", grm.operations[existingOpIndex].expBackoff. + GenerateNoRetriesPermittedMsg(operationName)) +} + +func (grm *nestedPendingOperations) Wait() { + grm.lock.Lock() + defer grm.lock.Unlock() + + for len(grm.operations) > 0 { + grm.cond.Wait() + } +} + +func getOperationName( + volumeName api.UniqueVolumeName, podName types.UniquePodName) string { + podNameStr := "" + if podName != emptyUniquePodName { + podNameStr = fmt.Sprintf(" (%q)", podName) + } + + return fmt.Sprintf("%q%s", + volumeName, + podNameStr) +} + +// NewAlreadyExistsError returns a new instance of AlreadyExists error. +func NewAlreadyExistsError(operationName string) error { + return alreadyExistsError{operationName} +} + +// IsAlreadyExists returns true if an error returned from +// NestedPendingOperations indicates a new operation can not be started because +// an operation with the same operation name is already executing. +func IsAlreadyExists(err error) bool { + switch err.(type) { + case alreadyExistsError: + return true + default: + return false + } +} + +// alreadyExistsError is the error returned by NestedPendingOperations when a +// new operation can not be started because an operation with the same operation +// name is already executing. +type alreadyExistsError struct { + operationName string +} + +var _ error = alreadyExistsError{} + +func (err alreadyExistsError) Error() string { + return fmt.Sprintf( + "Failed to create operation with name %q. An operation with that name is already executing.", + err.operationName) +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/OWNERS new file mode 100644 index 0000000000..73ab6a21c9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/OWNERS @@ -0,0 +1,2 @@ +assignees: + - saad-ali diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go index 30eebab13e..0bd9c79104 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go @@ -15,8 +15,9 @@ limitations under the License. */ // Package operationexecutor implements interfaces that enable execution of -// attach, detach, mount, and unmount operations with a goroutinemap so that -// more than one operation is never triggered on the same volume. +// attach, detach, mount, and unmount operations with a +// nestedpendingoperations so that more than one operation is never triggered +// on the same volume for the same pod. package operationexecutor import ( @@ -27,13 +28,14 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util/goroutinemap" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" + "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // OperationExecutor defines a set of operations for attaching, detaching, -// mounting, or unmounting a volume that are executed with a goroutinemap which +// mounting, or unmounting a volume that are executed with a NewNestedPendingOperations which // prevents more than one operation from being triggered on the same volume. // // These operations should be idempotent (for example, AttachVolume should @@ -105,7 +107,7 @@ func NewOperationExecutor( return &operationExecutor{ kubeClient: kubeClient, volumePluginMgr: volumePluginMgr, - pendingOperations: goroutinemap.NewGoRoutineMap( + pendingOperations: nestedpendingoperations.NewNestedPendingOperations( true /* exponentialBackOffOnError */), } } @@ -129,8 +131,14 @@ type ActualStateOfWorldMounterUpdater interface { // ActualStateOfWorldAttacherUpdater defines a set of operations updating the // actual state of the world cache after successful attach/detach/mount/unmount. type ActualStateOfWorldAttacherUpdater interface { - // Marks the specified volume as attached to the specified node - MarkVolumeAsAttached(volumeSpec *volume.Spec, nodeName string, devicePath string) error + // Marks the specified volume as attached to the specified node. If the + // volume name is supplied, that volume name will be used. If not, the + // volume name is computed using the result from querying the plugin. + // + // TODO: in the future, we should be able to remove the volumeName + // argument to this method -- since it is used only for attachable + // volumes. See issue 29695. + MarkVolumeAsAttached(volumeName api.UniqueVolumeName, volumeSpec *volume.Spec, nodeName, devicePath string) error // Marks the specified volume as detached from the specified node MarkVolumeAsDetached(volumeName api.UniqueVolumeName, nodeName string) @@ -323,7 +331,7 @@ type operationExecutor struct { // pendingOperations keeps track of pending attach and detach operations so // multiple operations are not started on the same volume - pendingOperations goroutinemap.GoRoutineMap + pendingOperations nestedpendingoperations.NestedPendingOperations } func (oe *operationExecutor) AttachVolume( @@ -336,7 +344,7 @@ func (oe *operationExecutor) AttachVolume( } return oe.pendingOperations.Run( - string(volumeToAttach.VolumeName), attachFunc) + volumeToAttach.VolumeName, "" /* podName */, attachFunc) } func (oe *operationExecutor) DetachVolume( @@ -350,7 +358,7 @@ func (oe *operationExecutor) DetachVolume( } return oe.pendingOperations.Run( - string(volumeToDetach.VolumeName), detachFunc) + volumeToDetach.VolumeName, "" /* podName */, detachFunc) } func (oe *operationExecutor) MountVolume( @@ -363,8 +371,16 @@ func (oe *operationExecutor) MountVolume( return err } + podName := volumetypes.UniquePodName("") + // TODO: remove this -- not necessary + if !volumeToMount.PluginIsAttachable { + // Non-attachable volume plugins can execute mount for multiple pods + // referencing the same volume in parallel + podName = volumehelper.GetUniquePodName(volumeToMount.Pod) + } + return oe.pendingOperations.Run( - string(volumeToMount.VolumeName), mountFunc) + volumeToMount.VolumeName, podName, mountFunc) } func (oe *operationExecutor) UnmountVolume( @@ -376,8 +392,12 @@ func (oe *operationExecutor) UnmountVolume( return err } + // All volume plugins can execute mount for multiple pods referencing the + // same volume in parallel + podName := volumetypes.UniquePodName(volumeToUnmount.PodUID) + return oe.pendingOperations.Run( - string(volumeToUnmount.VolumeName), unmountFunc) + volumeToUnmount.VolumeName, podName, unmountFunc) } func (oe *operationExecutor) UnmountDevice( @@ -390,7 +410,7 @@ func (oe *operationExecutor) UnmountDevice( } return oe.pendingOperations.Run( - string(deviceToDetach.VolumeName), unmountDeviceFunc) + deviceToDetach.VolumeName, "" /* podName */, unmountDeviceFunc) } func (oe *operationExecutor) VerifyControllerAttachedVolume( @@ -404,7 +424,7 @@ func (oe *operationExecutor) VerifyControllerAttachedVolume( } return oe.pendingOperations.Run( - string(volumeToMount.VolumeName), verifyControllerAttachedVolumeFunc) + volumeToMount.VolumeName, "" /* podName */, verifyControllerAttachedVolumeFunc) } func (oe *operationExecutor) generateAttachVolumeFunc( @@ -455,7 +475,7 @@ func (oe *operationExecutor) generateAttachVolumeFunc( // Update actual state of world addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( - volumeToAttach.VolumeSpec, volumeToAttach.NodeName, devicePath) + api.UniqueVolumeName(""), volumeToAttach.VolumeSpec, volumeToAttach.NodeName, devicePath) if addVolumeNodeErr != nil { // On failure, return error. Caller will log and retry. return fmt.Errorf( @@ -894,12 +914,13 @@ func (oe *operationExecutor) generateVerifyControllerAttachedVolumeFunc( // If the volume does not implement the attacher interface, it is // assumed to be attached and the the actual state of the world is // updated accordingly. + addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( - volumeToMount.VolumeSpec, nodeName, volumeToMount.DevicePath) + volumeToMount.VolumeName, volumeToMount.VolumeSpec, nodeName, "" /* devicePath */) if addVolumeNodeErr != nil { // On failure, return error. Caller will log and retry. return fmt.Errorf( - "VerifyControllerAttachedVolume.MarkVolumeAsAttached failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v.", + "VerifyControllerAttachedVolume.MarkVolumeAsAttachedByUniqueVolumeName failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v.", volumeToMount.VolumeName, volumeToMount.VolumeSpec.Name(), volumeToMount.PodName, @@ -950,7 +971,7 @@ func (oe *operationExecutor) generateVerifyControllerAttachedVolumeFunc( for _, attachedVolume := range node.Status.VolumesAttached { if attachedVolume.Name == volumeToMount.VolumeName { addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( - volumeToMount.VolumeSpec, nodeName, attachedVolume.DevicePath) + api.UniqueVolumeName(""), volumeToMount.VolumeSpec, nodeName, attachedVolume.DevicePath) glog.Infof("Controller successfully attached volume %q (spec.Name: %q) pod %q (UID: %q) devicePath: %q", volumeToMount.VolumeName, volumeToMount.VolumeSpec.Name(), diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/volumehelper/volumehelper.go b/vendor/k8s.io/kubernetes/pkg/volume/util/volumehelper/volumehelper.go index 9ddc363735..a761c1fffb 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/volumehelper/volumehelper.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/volumehelper/volumehelper.go @@ -52,6 +52,12 @@ func GetUniqueVolumeName(pluginName, volumeName string) api.UniqueVolumeName { return api.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName)) } +// GetUniqueVolumeNameForNonAttachableVolume returns the unique volume name +// for a non-attachable volume. +func GetUniqueVolumeNameForNonAttachableVolume(podName types.UniquePodName, volumePlugin volume.VolumePlugin, podSpecName string) api.UniqueVolumeName { + return api.UniqueVolumeName(fmt.Sprintf("%s/%v-%s", volumePlugin.GetPluginName(), podName, podSpecName)) +} + // GetUniqueVolumeNameFromSpec uses the given VolumePlugin to generate a unique // name representing the volume defined in the specified volume spec. // This returned name can be used to uniquely reference the actual backing diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/admission.go index 67648058ed..915ea63a5a 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/admission.go @@ -136,7 +136,7 @@ func (l *persistentVolumeLabel) getEBSVolumes() (aws.Volumes, error) { if err != nil || cloudProvider == nil { return nil, err } - awsCloudProvider, ok := cloudProvider.(*aws.AWSCloud) + awsCloudProvider, ok := cloudProvider.(*aws.Cloud) if !ok { // GetCloudProvider has gone very wrong return nil, fmt.Errorf("error retrieving AWS cloud provider")