Update kubernetes to v1.3.0-beta.2

pull/243/head
Jimmi Dyson 2016-06-29 10:44:08 +01:00
parent 3380a56971
commit 8e058cc73c
No known key found for this signature in database
GPG Key ID: 978CD4AF4C1E87F5
167 changed files with 9753 additions and 4496 deletions

1923
Godeps/Godeps.json generated

File diff suppressed because it is too large Load Diff

View File

@ -197,24 +197,38 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
)
if dockerStorageDriver == devicemapperStorageDriver {
// If the storage drive is devicemapper, create and start a
// ThinPoolWatcher to monitor the size of container CoW layers with
// thin_ls.
dockerThinPoolName, err := dockerutil.DockerThinPoolName(*dockerInfo)
if err != nil {
return fmt.Errorf("couldn't find device mapper thin pool name: %v", err)
}
_, err := devicemapper.ThinLsBinaryPresent()
if err == nil {
// If the storage driver is devicemapper, create and start a
// ThinPoolWatcher to monitor the size of container CoW layers
// with thin_ls.
dockerThinPoolName, err := dockerutil.DockerThinPoolName(*dockerInfo)
if err != nil {
return fmt.Errorf("couldn't find device mapper thin pool name: %v", err)
}
dockerMetadataDevice, err := dockerutil.DockerMetadataDevice(*dockerInfo)
if err != nil {
return fmt.Errorf("couldn't determine devicemapper metadata device")
}
dockerMetadataDevice, err := dockerutil.DockerMetadataDevice(*dockerInfo)
if err != nil {
return fmt.Errorf("couldn't determine devicemapper metadata device: %v", err)
}
thinPoolWatcher = devicemapper.NewThinPoolWatcher(dockerThinPoolName, dockerMetadataDevice)
go thinPoolWatcher.Start()
thinPoolWatcher, err = devicemapper.NewThinPoolWatcher(dockerThinPoolName, dockerMetadataDevice)
if err != nil {
return fmt.Errorf("couldn't create thin pool watcher: %v", err)
}
go thinPoolWatcher.Start()
} else {
msg := []string{
"Couldn't locate thin_ls binary; not starting thin pool watcher.",
"Containers backed by thin pools will not show accurate usage.",
"err: %v",
}
glog.Errorf(strings.Join(msg, " "), err)
}
}
glog.Infof("registering Docker factory")
glog.Infof("Registering Docker factory")
f := &dockerFactory{
cgroupSubsystems: cgroupSubsystems,
client: client,

View File

@ -21,19 +21,26 @@ import (
"github.com/golang/glog"
)
// DmsetupClient is a low-level client for interacting with devicemapper via
// the dmsetup utility.
// DmsetupClient is a low-level client for interacting with device mapper via
// the `dmsetup` utility, which is provided by the `device-mapper` package.
type DmsetupClient interface {
// Table runs `dmsetup table` on the given device name and returns the
// output or an error.
Table(deviceName string) ([]byte, error)
// Message runs `dmsetup message` on the given device, passing the given
// message to the given sector, and returns the output or an error.
Message(deviceName string, sector int, message string) ([]byte, error)
// Status runs `dmsetup status` on the given device and returns the output
// or an error.
Status(deviceName string) ([]byte, error)
}
// NewDmSetupClient returns a new DmsetupClient.
func NewDmsetupClient() DmsetupClient {
return &defaultDmsetupClient{}
}
// defaultDmsetupClient implements the standard behavior for interacting with dmsetup.
// defaultDmsetupClient is a functional DmsetupClient
type defaultDmsetupClient struct{}
var _ DmsetupClient = &defaultDmsetupClient{}

View File

@ -24,24 +24,38 @@ import (
"github.com/golang/glog"
)
// thinLsClient knows how to run a thin_ls very specific to CoW usage for containers.
// thinLsClient knows how to run a thin_ls very specific to CoW usage for
// containers.
type thinLsClient interface {
// ThinLs runs a thin ls on the given device, which is expected to be a
// metadata device. The caller must hold the metadata snapshot for the
// device.
ThinLs(deviceName string) (map[string]uint64, error)
}
func newThinLsClient() thinLsClient {
return &defaultThinLsClient{}
// newThinLsClient returns a thinLsClient or an error if the thin_ls binary
// couldn't be located.
func newThinLsClient() (thinLsClient, error) {
thinLsPath, err := ThinLsBinaryPresent()
if err != nil {
return nil, fmt.Errorf("error creating thin_ls client: %v", err)
}
return &defaultThinLsClient{thinLsPath}, nil
}
type defaultThinLsClient struct{}
// defaultThinLsClient is a functional thinLsClient
type defaultThinLsClient struct {
thinLsPath string
}
var _ thinLsClient = &defaultThinLsClient{}
func (*defaultThinLsClient) ThinLs(deviceName string) (map[string]uint64, error) {
func (c *defaultThinLsClient) ThinLs(deviceName string) (map[string]uint64, error) {
args := []string{"--no-headers", "-m", "-o", "DEV,EXCLUSIVE_BYTES", deviceName}
glog.V(4).Infof("running command: thin_ls %v", strings.Join(args, " "))
output, err := exec.Command("thin_ls", args...).Output()
output, err := exec.Command(c.thinLsPath, args...).Output()
if err != nil {
return nil, fmt.Errorf("Error running command `thin_ls %v`: %v\noutput:\n\n%v", strings.Join(args, " "), err, string(output))
}
@ -49,7 +63,8 @@ func (*defaultThinLsClient) ThinLs(deviceName string) (map[string]uint64, error)
return parseThinLsOutput(output), nil
}
// parseThinLsOutput parses the output returned by thin_ls to build a map of device id -> usage.
// parseThinLsOutput parses the output returned by thin_ls to build a map of
// device id -> usage.
func parseThinLsOutput(output []byte) map[string]uint64 {
cache := map[string]uint64{}

View File

@ -22,7 +22,8 @@ import (
"github.com/golang/glog"
)
// ThinPoolWatcher maintains a cache of device name -> usage stats for a devicemapper thin-pool using thin_ls.
// ThinPoolWatcher maintains a cache of device name -> usage stats for a
// devicemapper thin-pool using thin_ls.
type ThinPoolWatcher struct {
poolName string
metadataDevice string
@ -34,8 +35,14 @@ type ThinPoolWatcher struct {
thinLsClient thinLsClient
}
// NewThinPoolWatcher returns a new ThinPoolWatcher for the given devicemapper thin pool name and metadata device.
func NewThinPoolWatcher(poolName, metadataDevice string) *ThinPoolWatcher {
// NewThinPoolWatcher returns a new ThinPoolWatcher for the given devicemapper
// thin pool name and metadata device or an error.
func NewThinPoolWatcher(poolName, metadataDevice string) (*ThinPoolWatcher, error) {
thinLsClient, err := newThinLsClient()
if err != nil {
return nil, fmt.Errorf("encountered error creating thin_ls client: %v", err)
}
return &ThinPoolWatcher{poolName: poolName,
metadataDevice: metadataDevice,
lock: &sync.RWMutex{},
@ -43,11 +50,11 @@ func NewThinPoolWatcher(poolName, metadataDevice string) *ThinPoolWatcher {
period: 15 * time.Second,
stopChan: make(chan struct{}),
dmsetup: NewDmsetupClient(),
thinLsClient: newThinLsClient(),
}
thinLsClient: thinLsClient,
}, nil
}
// Start starts the thin pool watcher.
// Start starts the ThinPoolWatcher.
func (w *ThinPoolWatcher) Start() {
err := w.Refresh()
if err != nil {
@ -72,6 +79,7 @@ func (w *ThinPoolWatcher) Start() {
}
}
// Stop stops the ThinPoolWatcher.
func (w *ThinPoolWatcher) Stop() {
close(w.stopChan)
}
@ -80,6 +88,7 @@ func (w *ThinPoolWatcher) Stop() {
func (w *ThinPoolWatcher) GetUsage(deviceId string) (uint64, error) {
w.lock.RLock()
defer w.lock.RUnlock()
v, ok := w.cache[deviceId]
if !ok {
return 0, fmt.Errorf("no cached value for usage of device %v", deviceId)
@ -115,7 +124,8 @@ func (w *ThinPoolWatcher) Refresh() error {
}
glog.Infof("reserving metadata snapshot for thin-pool %v", w.poolName)
// NOTE: "0" in the call below is for the 'sector' argument to 'dmsetup message'. It's not needed for thin pools.
// NOTE: "0" in the call below is for the 'sector' argument to 'dmsetup
// message'. It's not needed for thin pools.
if output, err := w.dmsetup.Message(w.poolName, 0, reserveMetadataMessage); err != nil {
err = fmt.Errorf("error reserving metadata for thin-pool %v: %v output: %v", w.poolName, err, string(output))
return err
@ -144,7 +154,8 @@ const (
thinPoolDmsetupStatusHeldMetadataRoot = 6
)
// checkReservation checks to see whether the thin device is currently holding userspace metadata.
// checkReservation checks to see whether the thin device is currently holding
// userspace metadata.
func (w *ThinPoolWatcher) checkReservation(poolName string) (bool, error) {
glog.V(5).Infof("checking whether the thin-pool is holding a metadata snapshot")
output, err := w.dmsetup.Status(poolName)
@ -153,7 +164,8 @@ func (w *ThinPoolWatcher) checkReservation(poolName string) (bool, error) {
}
tokens := strings.Split(string(output), " ")
// Split returns the input as the last item in the result, adjust the number of tokens by one
// Split returns the input as the last item in the result, adjust the
// number of tokens by one
if len(tokens) != thinPoolDmsetupStatusTokens+1 {
return false, fmt.Errorf("unexpected output of dmsetup status command; expected 11 fields, got %v; output: %v", len(tokens), string(output))
}

60
vendor/github.com/google/cadvisor/devicemapper/util.go generated vendored Normal file
View File

@ -0,0 +1,60 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package devicemapper
import (
"fmt"
"os"
"path/filepath"
)
// ThinLsBinaryPresent returns the location of the thin_ls binary in the mount
// namespace cadvisor is running in or an error. The locations checked are:
//
// - /bin/
// - /usr/sbin/
// - /usr/bin/
//
// ThinLsBinaryPresent checks these paths relative to:
//
// 1. For non-containerized operation - `/`
// 2. For containerized operation - `/rootfs`
//
// The thin_ls binary is provided by the device-mapper-persistent-data
// package.
func ThinLsBinaryPresent() (string, error) {
var (
thinLsPath string
err error
)
for _, path := range []string{"/bin", "/usr/sbin/", "/usr/bin"} {
// try paths for non-containerized operation
// note: thin_ls is most likely a symlink to pdata_tools
thinLsPath = filepath.Join(path, "thin_ls")
_, err = os.Stat(thinLsPath)
if err == nil {
return thinLsPath, nil
}
// try paths for containerized operation
thinLsPath = filepath.Join("/rootfs", thinLsPath)
_, err = os.Stat(thinLsPath)
if err == nil {
return thinLsPath, nil
}
}
return "", fmt.Errorf("unable to find thin_ls binary")
}

View File

@ -206,7 +206,8 @@ func InstCpuStats(last, cur *v1.ContainerStats) (*CpuInstStats, error) {
return 0, fmt.Errorf("cumulative stats decrease")
}
valueDelta := curValue - lastValue
return (valueDelta * 1e9) / timeDeltaNs, nil
// Use float64 to keep precision
return uint64(float64(valueDelta) / float64(timeDeltaNs) * 1e9), nil
}
total, err := convertToRate(last.Cpu.Usage.Total, cur.Cpu.Usage.Total)
if err != nil {
@ -268,57 +269,3 @@ func ContainerSpecFromV1(specV1 *v1.ContainerSpec, aliases []string, namespace s
specV2.Namespace = namespace
return specV2
}
func instCpuStats(last, cur *v1.ContainerStats) (*CpuInstStats, error) {
if last == nil {
return nil, nil
}
if !cur.Timestamp.After(last.Timestamp) {
return nil, fmt.Errorf("container stats move backwards in time")
}
if len(last.Cpu.Usage.PerCpu) != len(cur.Cpu.Usage.PerCpu) {
return nil, fmt.Errorf("different number of cpus")
}
timeDelta := cur.Timestamp.Sub(last.Timestamp)
if timeDelta <= 100*time.Millisecond {
return nil, fmt.Errorf("time delta unexpectedly small")
}
// Nanoseconds to gain precision and avoid having zero seconds if the
// difference between the timestamps is just under a second
timeDeltaNs := uint64(timeDelta.Nanoseconds())
convertToRate := func(lastValue, curValue uint64) (uint64, error) {
if curValue < lastValue {
return 0, fmt.Errorf("cumulative stats decrease")
}
valueDelta := curValue - lastValue
return (valueDelta * 1e9) / timeDeltaNs, nil
}
total, err := convertToRate(last.Cpu.Usage.Total, cur.Cpu.Usage.Total)
if err != nil {
return nil, err
}
percpu := make([]uint64, len(last.Cpu.Usage.PerCpu))
for i := range percpu {
var err error
percpu[i], err = convertToRate(last.Cpu.Usage.PerCpu[i], cur.Cpu.Usage.PerCpu[i])
if err != nil {
return nil, err
}
}
user, err := convertToRate(last.Cpu.Usage.User, cur.Cpu.Usage.User)
if err != nil {
return nil, err
}
system, err := convertToRate(last.Cpu.Usage.System, cur.Cpu.Usage.System)
if err != nil {
return nil, err
}
return &CpuInstStats{
Usage: CpuInstUsage{
Total: total,
PerCpu: percpu,
User: user,
System: system,
},
}, nil
}

View File

@ -1 +1 @@
0.23.3
0.23.4

View File

@ -399,6 +399,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
cloud,
s.ClusterName,
nil, nil, nil,
s.VolumeConfiguration.EnableDynamicProvisioning,
)
volumeController.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

View File

@ -75,6 +75,7 @@ func NewCMServer() *CMServer {
TerminatedPodGCThreshold: 12500,
VolumeConfiguration: componentconfig.VolumeConfiguration{
EnableHostPathProvisioning: false,
EnableDynamicProvisioning: true,
PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{
MaximumRetry: 3,
MinimumTimeoutNFS: 300,
@ -125,13 +126,14 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.")
fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.")
fs.BoolVar(&s.VolumeConfiguration.EnableHostPathProvisioning, "enable-hostpath-provisioner", s.VolumeConfiguration.EnableHostPathProvisioning, "Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.")
fs.BoolVar(&s.VolumeConfiguration.EnableDynamicProvisioning, "enable-dynamic-provisioning", s.VolumeConfiguration.EnableDynamicProvisioning, "Enable dynamic provisioning for environments that support it.")
fs.StringVar(&s.VolumeConfiguration.FlexVolumePluginDir, "flex-volume-plugin-dir", s.VolumeConfiguration.FlexVolumePluginDir, "Full path of the directory in which the flex volume plugin should search for additional third party volume plugins.")
fs.Int32Var(&s.TerminatedPodGCThreshold, "terminated-pod-gc-threshold", s.TerminatedPodGCThreshold, "Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.")
fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod.Duration, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod.Duration, "The period for syncing the number of pods in horizontal pod autoscaler.")
fs.DurationVar(&s.DeploymentControllerSyncPeriod.Duration, "deployment-controller-sync-period", s.DeploymentControllerSyncPeriod.Duration, "Period for syncing the deployments.")
fs.DurationVar(&s.PodEvictionTimeout.Duration, "pod-eviction-timeout", s.PodEvictionTimeout.Duration, "The grace period for deleting pods on failed nodes.")
fs.Float32Var(&s.DeletingPodsQps, "deleting-pods-qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.")
fs.Int32Var(&s.DeletingPodsBurst, "deleting-pods-burst", 10, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.")
fs.Int32Var(&s.DeletingPodsBurst, "deleting-pods-burst", 1, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.")
fs.Int32Var(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+
"The number of retries for initial node registration. Retry interval equals node-sync-period.")
fs.MarkDeprecated("register-retry-count", "This flag is currently no-op and will be deleted.")

View File

@ -85,6 +85,7 @@ func NewKubeletServer() *KubeletServer {
CgroupRoot: "",
ConfigureCBR0: false,
ContainerRuntime: "docker",
RuntimeRequestTimeout: unversioned.Duration{Duration: 2 * time.Minute},
CPUCFSQuota: true,
DockerExecHandlerName: "native",
EventBurst: 10,
@ -227,6 +228,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.CgroupRoot, "cgroup-root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.")
fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.")
fs.DurationVar(&s.RuntimeRequestTimeout.Duration, "runtime-request-timeout", s.RuntimeRequestTimeout.Duration, "Timeout of all runtime requests except long running request - pull, logs, exec and attach. When timeout exceeded, kubelet will cancel the request, throw out an error and retry later. Default: 2m0s")
fs.StringVar(&s.LockFilePath, "lock-file", s.LockFilePath, "<Warning: Alpha feature> The path to file for kubelet to use as a lock file.")
fs.BoolVar(&s.ExitOnLockContention, "exit-on-lock-contention", s.ExitOnLockContention, "Whether kubelet should exit upon lock-file contention.")
fs.StringVar(&s.RktPath, "rkt-path", s.RktPath, "Path of rkt binary. Leave empty to use the first rkt in $PATH. Only used if --container-runtime='rkt'.")
@ -269,5 +271,5 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.EvictionSoftGracePeriod, "eviction-soft-grace-period", s.EvictionSoftGracePeriod, "A set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a pod eviction.")
fs.DurationVar(&s.EvictionPressureTransitionPeriod.Duration, "eviction-pressure-transition-period", s.EvictionPressureTransitionPeriod.Duration, "Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.")
fs.Int32Var(&s.EvictionMaxPodGracePeriod, "eviction-max-pod-grace-period", s.EvictionMaxPodGracePeriod, "Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. If negative, defer to pod specified value.")
fs.Int32Var(&s.PodsPerCore, "pods-per-core", s.PodsPerCore, "Number of Pods per core that can run on this Kubelet. The total number of Pods on this Kubelet cannot exceed max-pods, so max-pods will be used if this caulcation results in a larger number of Pods allowed on the Kubelet. A value of 0 disables this limit.")
fs.Int32Var(&s.PodsPerCore, "pods-per-core", s.PodsPerCore, "Number of Pods per core that can run on this Kubelet. The total number of Pods on this Kubelet cannot exceed max-pods, so max-pods will be used if this calculation results in a larger number of Pods allowed on the Kubelet. A value of 0 disables this limit.")
}

View File

@ -210,9 +210,10 @@ func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) {
ConfigureCBR0: s.ConfigureCBR0,
ContainerManager: nil,
ContainerRuntime: s.ContainerRuntime,
RuntimeRequestTimeout: s.RuntimeRequestTimeout.Duration,
CPUCFSQuota: s.CPUCFSQuota,
DiskSpacePolicy: diskSpacePolicy,
DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint, s.RuntimeRequestTimeout.Duration), // TODO(random-liu): Set RuntimeRequestTimeout for rkt.
RuntimeCgroups: s.RuntimeCgroups,
DockerExecHandler: dockerExecHandler,
EnableControllerAttachDetach: s.EnableControllerAttachDetach,
@ -410,10 +411,12 @@ func InitializeTLS(s *options.KubeletServer) (*server.TLSOptions, error) {
if s.TLSCertFile == "" && s.TLSPrivateKeyFile == "" {
s.TLSCertFile = path.Join(s.CertDirectory, "kubelet.crt")
s.TLSPrivateKeyFile = path.Join(s.CertDirectory, "kubelet.key")
if err := crypto.GenerateSelfSignedCert(nodeutil.GetHostname(s.HostnameOverride), s.TLSCertFile, s.TLSPrivateKeyFile, nil, nil); err != nil {
return nil, fmt.Errorf("unable to generate self signed cert: %v", err)
if crypto.ShouldGenSelfSignedCerts(s.TLSCertFile, s.TLSPrivateKeyFile) {
if err := crypto.GenerateSelfSignedCert(nodeutil.GetHostname(s.HostnameOverride), s.TLSCertFile, s.TLSPrivateKeyFile, nil, nil); err != nil {
return nil, fmt.Errorf("unable to generate self signed cert: %v", err)
}
glog.V(4).Infof("Using self-signed cert (%s, %s)", s.TLSCertFile, s.TLSPrivateKeyFile)
}
glog.V(4).Infof("Using self-signed cert (%s, %s)", s.TLSCertFile, s.TLSPrivateKeyFile)
}
tlsOptions := &server.TLSOptions{
Config: &tls.Config{
@ -781,6 +784,7 @@ type KubeletConfig struct {
ConfigureCBR0 bool
ContainerManager cm.ContainerManager
ContainerRuntime string
RuntimeRequestTimeout time.Duration
CPUCFSQuota bool
DiskSpacePolicy kubelet.DiskSpacePolicy
DockerClient dockertools.DockerInterface
@ -921,6 +925,7 @@ func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
kc.OSInterface,
kc.CgroupRoot,
kc.ContainerRuntime,
kc.RuntimeRequestTimeout,
kc.RktPath,
kc.RktAPIEndpoint,
kc.RktStage1Image,

View File

@ -35,6 +35,7 @@ func init() {
if err := Scheme.AddGeneratedDeepCopyFuncs(
DeepCopy_api_AWSElasticBlockStoreVolumeSource,
DeepCopy_api_Affinity,
DeepCopy_api_AttachedVolume,
DeepCopy_api_AzureFileVolumeSource,
DeepCopy_api_Binding,
DeepCopy_api_Capabilities,
@ -228,6 +229,12 @@ func DeepCopy_api_Affinity(in Affinity, out *Affinity, c *conversion.Cloner) err
return nil
}
func DeepCopy_api_AttachedVolume(in AttachedVolume, out *AttachedVolume, c *conversion.Cloner) error {
out.Name = in.Name
out.DevicePath = in.DevicePath
return nil
}
func DeepCopy_api_AzureFileVolumeSource(in AzureFileVolumeSource, out *AzureFileVolumeSource, c *conversion.Cloner) error {
out.SecretName = in.SecretName
out.ShareName = in.ShareName
@ -1603,13 +1610,24 @@ func DeepCopy_api_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Clone
}
if in.VolumesInUse != nil {
in, out := in.VolumesInUse, &out.VolumesInUse
*out = make([]UniqueDeviceName, len(in))
*out = make([]UniqueVolumeName, len(in))
for i := range in {
(*out)[i] = in[i]
}
} else {
out.VolumesInUse = nil
}
if in.VolumesAttached != nil {
in, out := in.VolumesAttached, &out.VolumesAttached
*out = make([]AttachedVolume, len(in))
for i := range in {
if err := DeepCopy_api_AttachedVolume(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.VolumesAttached = nil
}
return nil
}

View File

@ -416,6 +416,14 @@ const (
// TaintsAnnotationKey represents the key of taints data (json serialized)
// in the Annotations of a Node.
TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
// SeccompPodAnnotationKey represents the key of a seccomp profile applied
// to all containers of a pod.
SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod"
// SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied
// to one container of a pod.
SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/"
)
// GetAffinityFromPod gets the json serialized affinity data from Pod.Annotations

View File

@ -17,6 +17,7 @@ limitations under the License.
package api
import (
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/meta/metatypes"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/conversion"
@ -64,6 +65,10 @@ func ListMetaFor(obj runtime.Object) (*unversioned.ListMeta, error) {
return meta, err
}
func (obj *ObjectMeta) GetObjectMeta() meta.Object { return obj }
func (obj *ObjectReference) GetObjectKind() unversioned.ObjectKind { return obj }
// Namespace implements meta.Object for any object with an ObjectMeta typed field. Allows
// fast, direct access to metadata fields for API objects.
func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace }

View File

@ -73,20 +73,10 @@ type ListMetaAccessor interface {
// List lets you work with list metadata from any of the versioned or
// internal API objects. Attempting to set or retrieve a field on an object that does
// not support that field will be a no-op and return a default value.
type List interface {
GetResourceVersion() string
SetResourceVersion(version string)
GetSelfLink() string
SetSelfLink(selfLink string)
}
type List unversioned.List
// Type exposes the type and APIVersion of versioned or internal API objects.
type Type interface {
GetAPIVersion() string
SetAPIVersion(version string)
GetKind() string
SetKind(kind string)
}
type Type unversioned.Type
// MetadataAccessor lets you work with object and list metadata from any of the versioned or
// internal API objects. Attempting to set or retrieve a field on an object that does

View File

@ -35,6 +35,11 @@ func ListAccessor(obj interface{}) (List, error) {
return om, nil
}
}
if listMetaAccessor, ok := obj.(unversioned.ListMetaAccessor); ok {
if om := listMetaAccessor.GetListMeta(); om != nil {
return om, nil
}
}
// we may get passed an object that is directly portable to List
if list, ok := obj.(List); ok {
return list, nil

View File

@ -17,7 +17,6 @@ limitations under the License.
package api
import (
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/runtime/serializer"
@ -114,70 +113,3 @@ func AddToScheme(scheme *runtime.Scheme) {
&unversioned.APIResourceList{},
)
}
func (obj *Pod) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *Pod) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodStatusResult) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *PodStatusResult) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodTemplate) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *PodTemplate) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodTemplateList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ReplicationController) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *ReplicationController) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ReplicationControllerList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Service) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *Service) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ServiceList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Endpoints) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *Endpoints) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *EndpointsList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Node) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *Node) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *NodeList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *NodeProxyOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Binding) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *Binding) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Event) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *Event) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *EventList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *List) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ListOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *LimitRange) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *LimitRange) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *LimitRangeList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ResourceQuota) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *ResourceQuota) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ResourceQuotaList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Namespace) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *Namespace) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *NamespaceList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ServiceAccount) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *ServiceAccount) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ServiceAccountList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Secret) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *Secret) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *SecretList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PersistentVolume) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *PersistentVolume) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PersistentVolumeList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PersistentVolumeClaim) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *PersistentVolumeClaim) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PersistentVolumeClaimList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *DeleteOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodAttachOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodLogOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodExecOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodProxyOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ServiceProxyOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ComponentStatus) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *ComponentStatus) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ComponentStatusList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *SerializedReference) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *RangeAllocation) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *RangeAllocation) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ObjectReference) GetObjectKind() unversioned.ObjectKind { return obj }
func (obj *ExportOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ConfigMap) GetObjectMeta() meta.Object { return &obj.ObjectMeta }
func (obj *ConfigMap) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ConfigMapList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -52,7 +52,7 @@ var (
zeroBytes = []byte("0")
)
// int64Amount represents a fixed precision numerator and arbitary scale exponent. It is faster
// int64Amount represents a fixed precision numerator and arbitrary scale exponent. It is faster
// than operations on inf.Dec for values that can be represented as int64.
type int64Amount struct {
value int64

View File

@ -36525,7 +36525,7 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
} else {
yysep2 := !z.EncBinary()
yy2arr2 := z.EncBasicHandle().StructToArray
var yyq2 [9]bool
var yyq2 [10]bool
_, _, _ = yysep2, yyq2, yy2arr2
const yyr2 bool = false
yyq2[0] = len(x.Capacity) != 0
@ -36537,9 +36537,10 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
yyq2[6] = true
yyq2[7] = len(x.Images) != 0
yyq2[8] = len(x.VolumesInUse) != 0
yyq2[9] = len(x.VolumesAttached) != 0
var yynn2 int
if yyr2 || yy2arr2 {
r.EncodeArrayStart(9)
r.EncodeArrayStart(10)
} else {
yynn2 = 0
for _, b := range yyq2 {
@ -36754,7 +36755,7 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
_ = yym32
if false {
} else {
h.encSliceUniqueDeviceName(([]UniqueDeviceName)(x.VolumesInUse), e)
h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e)
}
}
} else {
@ -36772,7 +36773,40 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
_ = yym33
if false {
} else {
h.encSliceUniqueDeviceName(([]UniqueDeviceName)(x.VolumesInUse), e)
h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e)
}
}
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq2[9] {
if x.VolumesAttached == nil {
r.EncodeNil()
} else {
yym35 := z.EncBinary()
_ = yym35
if false {
} else {
h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e)
}
}
} else {
r.EncodeNil()
}
} else {
if yyq2[9] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("volumesAttached"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.VolumesAttached == nil {
r.EncodeNil()
} else {
yym36 := z.EncBinary()
_ = yym36
if false {
} else {
h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e)
}
}
}
@ -36917,7 +36951,19 @@ func (x *NodeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
_ = yym16
if false {
} else {
h.decSliceUniqueDeviceName((*[]UniqueDeviceName)(yyv15), d)
h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv15), d)
}
}
case "volumesAttached":
if r.TryDecodeAsNil() {
x.VolumesAttached = nil
} else {
yyv17 := &x.VolumesAttached
yym18 := z.DecBinary()
_ = yym18
if false {
} else {
h.decSliceAttachedVolume((*[]AttachedVolume)(yyv17), d)
}
}
default:
@ -36931,16 +36977,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj17 int
var yyb17 bool
var yyhl17 bool = l >= 0
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
var yyj19 int
var yyb19 bool
var yyhl19 bool = l >= 0
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -36948,16 +36994,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.Capacity = nil
} else {
yyv18 := &x.Capacity
yyv18.CodecDecodeSelf(d)
yyv20 := &x.Capacity
yyv20.CodecDecodeSelf(d)
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -36965,16 +37011,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.Allocatable = nil
} else {
yyv19 := &x.Allocatable
yyv19.CodecDecodeSelf(d)
yyv21 := &x.Allocatable
yyv21.CodecDecodeSelf(d)
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -36984,13 +37030,13 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
} else {
x.Phase = NodePhase(r.DecodeString())
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -36998,21 +37044,21 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.Conditions = nil
} else {
yyv21 := &x.Conditions
yym22 := z.DecBinary()
_ = yym22
yyv23 := &x.Conditions
yym24 := z.DecBinary()
_ = yym24
if false {
} else {
h.decSliceNodeCondition((*[]NodeCondition)(yyv21), d)
h.decSliceNodeCondition((*[]NodeCondition)(yyv23), d)
}
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -37020,21 +37066,21 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.Addresses = nil
} else {
yyv23 := &x.Addresses
yym24 := z.DecBinary()
_ = yym24
yyv25 := &x.Addresses
yym26 := z.DecBinary()
_ = yym26
if false {
} else {
h.decSliceNodeAddress((*[]NodeAddress)(yyv23), d)
h.decSliceNodeAddress((*[]NodeAddress)(yyv25), d)
}
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -37042,16 +37088,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.DaemonEndpoints = NodeDaemonEndpoints{}
} else {
yyv25 := &x.DaemonEndpoints
yyv25.CodecDecodeSelf(d)
yyv27 := &x.DaemonEndpoints
yyv27.CodecDecodeSelf(d)
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -37059,16 +37105,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.NodeInfo = NodeSystemInfo{}
} else {
yyv26 := &x.NodeInfo
yyv26.CodecDecodeSelf(d)
yyv28 := &x.NodeInfo
yyv28.CodecDecodeSelf(d)
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -37076,21 +37122,21 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.Images = nil
} else {
yyv27 := &x.Images
yym28 := z.DecBinary()
_ = yym28
yyv29 := &x.Images
yym30 := z.DecBinary()
_ = yym30
if false {
} else {
h.decSliceContainerImage((*[]ContainerImage)(yyv27), d)
h.decSliceContainerImage((*[]ContainerImage)(yyv29), d)
}
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -37098,31 +37144,53 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.VolumesInUse = nil
} else {
yyv29 := &x.VolumesInUse
yym30 := z.DecBinary()
_ = yym30
yyv31 := &x.VolumesInUse
yym32 := z.DecBinary()
_ = yym32
if false {
} else {
h.decSliceUniqueDeviceName((*[]UniqueDeviceName)(yyv29), d)
h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv31), d)
}
}
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb19 = r.CheckBreak()
}
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.VolumesAttached = nil
} else {
yyv33 := &x.VolumesAttached
yym34 := z.DecBinary()
_ = yym34
if false {
} else {
h.decSliceAttachedVolume((*[]AttachedVolume)(yyv33), d)
}
}
for {
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj17-1, "")
z.DecStructFieldNotFound(yyj19-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x UniqueDeviceName) CodecEncodeSelf(e *codec1978.Encoder) {
func (x UniqueVolumeName) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
@ -37135,7 +37203,7 @@ func (x UniqueDeviceName) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
func (x *UniqueDeviceName) CodecDecodeSelf(d *codec1978.Decoder) {
func (x *UniqueVolumeName) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
@ -37148,6 +37216,199 @@ func (x *UniqueDeviceName) CodecDecodeSelf(d *codec1978.Decoder) {
}
}
func (x *AttachedVolume) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
if x == nil {
r.EncodeNil()
} else {
yym1 := z.EncBinary()
_ = yym1
if false {
} else if z.HasExtensions() && z.EncExt(x) {
} else {
yysep2 := !z.EncBinary()
yy2arr2 := z.EncBasicHandle().StructToArray
var yyq2 [2]bool
_, _, _ = yysep2, yyq2, yy2arr2
const yyr2 bool = false
var yynn2 int
if yyr2 || yy2arr2 {
r.EncodeArrayStart(2)
} else {
yynn2 = 2
for _, b := range yyq2 {
if b {
yynn2++
}
}
r.EncodeMapStart(yynn2)
yynn2 = 0
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
x.Name.CodecEncodeSelf(e)
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("name"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
x.Name.CodecEncodeSelf(e)
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yym7 := z.EncBinary()
_ = yym7
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath))
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("devicePath"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym8 := z.EncBinary()
_ = yym8
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath))
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
}
}
}
}
func (x *AttachedVolume) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yym1 := z.DecBinary()
_ = yym1
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
yyct2 := r.ContainerType()
if yyct2 == codecSelferValueTypeMap1234 {
yyl2 := r.ReadMapStart()
if yyl2 == 0 {
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
} else {
x.codecDecodeSelfFromMap(yyl2, d)
}
} else if yyct2 == codecSelferValueTypeArray1234 {
yyl2 := r.ReadArrayStart()
if yyl2 == 0 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
x.codecDecodeSelfFromArray(yyl2, d)
}
} else {
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
}
}
}
func (x *AttachedVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yys3Slc = z.DecScratchBuffer() // default slice to decode into
_ = yys3Slc
var yyhl3 bool = l >= 0
for yyj3 := 0; ; yyj3++ {
if yyhl3 {
if yyj3 >= l {
break
}
} else {
if r.CheckBreak() {
break
}
}
z.DecSendContainerState(codecSelfer_containerMapKey1234)
yys3Slc = r.DecodeBytes(yys3Slc, true, true)
yys3 := string(yys3Slc)
z.DecSendContainerState(codecSelfer_containerMapValue1234)
switch yys3 {
case "name":
if r.TryDecodeAsNil() {
x.Name = ""
} else {
x.Name = UniqueVolumeName(r.DecodeString())
}
case "devicePath":
if r.TryDecodeAsNil() {
x.DevicePath = ""
} else {
x.DevicePath = string(r.DecodeString())
}
default:
z.DecStructFieldNotFound(-1, yys3)
} // end switch yys3
} // end for yyj3
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
}
func (x *AttachedVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj6 int
var yyb6 bool
var yyhl6 bool = l >= 0
yyj6++
if yyhl6 {
yyb6 = yyj6 > l
} else {
yyb6 = r.CheckBreak()
}
if yyb6 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Name = ""
} else {
x.Name = UniqueVolumeName(r.DecodeString())
}
yyj6++
if yyhl6 {
yyb6 = yyj6 > l
} else {
yyb6 = r.CheckBreak()
}
if yyb6 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.DevicePath = ""
} else {
x.DevicePath = string(r.DecodeString())
}
for {
yyj6++
if yyhl6 {
yyb6 = yyj6 > l
} else {
yyb6 = r.CheckBreak()
}
if yyb6 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj6-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x *ContainerImage) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
@ -57367,7 +57628,7 @@ func (x codecSelfer1234) decSliceContainerImage(v *[]ContainerImage, d *codec197
}
}
func (x codecSelfer1234) encSliceUniqueDeviceName(v []UniqueDeviceName, e *codec1978.Encoder) {
func (x codecSelfer1234) encSliceUniqueVolumeName(v []UniqueVolumeName, e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
@ -57379,7 +57640,7 @@ func (x codecSelfer1234) encSliceUniqueDeviceName(v []UniqueDeviceName, e *codec
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *codec1978.Decoder) {
func (x codecSelfer1234) decSliceUniqueVolumeName(v *[]UniqueVolumeName, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
@ -57390,7 +57651,7 @@ func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *code
_ = yyc1
if yyl1 == 0 {
if yyv1 == nil {
yyv1 = []UniqueDeviceName{}
yyv1 = []UniqueVolumeName{}
yyc1 = true
} else if len(yyv1) != 0 {
yyv1 = yyv1[:0]
@ -57408,10 +57669,10 @@ func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *code
if yyrl1 <= cap(yyv1) {
yyv1 = yyv1[:yyrl1]
} else {
yyv1 = make([]UniqueDeviceName, yyrl1)
yyv1 = make([]UniqueVolumeName, yyrl1)
}
} else {
yyv1 = make([]UniqueDeviceName, yyrl1)
yyv1 = make([]UniqueVolumeName, yyrl1)
}
yyc1 = true
yyrr1 = len(yyv1)
@ -57425,7 +57686,7 @@ func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *code
if r.TryDecodeAsNil() {
yyv1[yyj1] = ""
} else {
yyv1[yyj1] = UniqueDeviceName(r.DecodeString())
yyv1[yyj1] = UniqueVolumeName(r.DecodeString())
}
}
@ -57436,7 +57697,7 @@ func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *code
if r.TryDecodeAsNil() {
yyv1[yyj1] = ""
} else {
yyv1[yyj1] = UniqueDeviceName(r.DecodeString())
yyv1[yyj1] = UniqueVolumeName(r.DecodeString())
}
}
@ -57447,7 +57708,7 @@ func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *code
for ; !r.CheckBreak(); yyj1++ {
if yyj1 >= len(yyv1) {
yyv1 = append(yyv1, "") // var yyz1 UniqueDeviceName
yyv1 = append(yyv1, "") // var yyz1 UniqueVolumeName
yyc1 = true
}
yyh1.ElemContainerState(yyj1)
@ -57455,7 +57716,7 @@ func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *code
if r.TryDecodeAsNil() {
yyv1[yyj1] = ""
} else {
yyv1[yyj1] = UniqueDeviceName(r.DecodeString())
yyv1[yyj1] = UniqueVolumeName(r.DecodeString())
}
} else {
@ -57467,7 +57728,126 @@ func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *code
yyv1 = yyv1[:yyj1]
yyc1 = true
} else if yyj1 == 0 && yyv1 == nil {
yyv1 = []UniqueDeviceName{}
yyv1 = []UniqueVolumeName{}
yyc1 = true
}
}
yyh1.End()
if yyc1 {
*v = yyv1
}
}
func (x codecSelfer1234) encSliceAttachedVolume(v []AttachedVolume, e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
r.EncodeArrayStart(len(v))
for _, yyv1 := range v {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yy2 := &yyv1
yy2.CodecEncodeSelf(e)
}
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x codecSelfer1234) decSliceAttachedVolume(v *[]AttachedVolume, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yyv1 := *v
yyh1, yyl1 := z.DecSliceHelperStart()
var yyc1 bool
_ = yyc1
if yyl1 == 0 {
if yyv1 == nil {
yyv1 = []AttachedVolume{}
yyc1 = true
} else if len(yyv1) != 0 {
yyv1 = yyv1[:0]
yyc1 = true
}
} else if yyl1 > 0 {
var yyrr1, yyrl1 int
var yyrt1 bool
_, _ = yyrl1, yyrt1
yyrr1 = yyl1 // len(yyv1)
if yyl1 > cap(yyv1) {
yyrg1 := len(yyv1) > 0
yyv21 := yyv1
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
if yyrt1 {
if yyrl1 <= cap(yyv1) {
yyv1 = yyv1[:yyrl1]
} else {
yyv1 = make([]AttachedVolume, yyrl1)
}
} else {
yyv1 = make([]AttachedVolume, yyrl1)
}
yyc1 = true
yyrr1 = len(yyv1)
if yyrg1 {
copy(yyv1, yyv21)
}
} else if yyl1 != len(yyv1) {
yyv1 = yyv1[:yyl1]
yyc1 = true
}
yyj1 := 0
for ; yyj1 < yyrr1; yyj1++ {
yyh1.ElemContainerState(yyj1)
if r.TryDecodeAsNil() {
yyv1[yyj1] = AttachedVolume{}
} else {
yyv2 := &yyv1[yyj1]
yyv2.CodecDecodeSelf(d)
}
}
if yyrt1 {
for ; yyj1 < yyl1; yyj1++ {
yyv1 = append(yyv1, AttachedVolume{})
yyh1.ElemContainerState(yyj1)
if r.TryDecodeAsNil() {
yyv1[yyj1] = AttachedVolume{}
} else {
yyv3 := &yyv1[yyj1]
yyv3.CodecDecodeSelf(d)
}
}
}
} else {
yyj1 := 0
for ; !r.CheckBreak(); yyj1++ {
if yyj1 >= len(yyv1) {
yyv1 = append(yyv1, AttachedVolume{}) // var yyz1 AttachedVolume
yyc1 = true
}
yyh1.ElemContainerState(yyj1)
if yyj1 < len(yyv1) {
if r.TryDecodeAsNil() {
yyv1[yyj1] = AttachedVolume{}
} else {
yyv4 := &yyv1[yyj1]
yyv4.CodecDecodeSelf(d)
}
} else {
z.DecSwallow()
}
}
if yyj1 < len(yyv1) {
yyv1 = yyv1[:yyj1]
yyc1 = true
} else if yyj1 == 0 && yyv1 == nil {
yyv1 = []AttachedVolume{}
yyc1 = true
}
}
@ -57630,7 +58010,7 @@ func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) {
yyrg1 := len(yyv1) > 0
yyv21 := yyv1
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 592)
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 616)
if yyrt1 {
if yyrl1 <= cap(yyv1) {
yyv1 = yyv1[:yyrl1]

View File

@ -306,6 +306,8 @@ type PersistentVolumeSpec struct {
// ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
// ClaimRef is expected to be non-nil when bound.
// claim.VolumeName is the authoritative bind between PV and PVC.
// When set to non-nil value, PVC.Spec.Selector of the referenced PVC is
// ignored, i.e. labels of this PV do not need to match PVC selector.
ClaimRef *ObjectReference `json:"claimRef,omitempty"`
// Optional: what happens to a persistent volume when released from its claim.
PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty"`
@ -366,11 +368,13 @@ type PersistentVolumeClaimList struct {
type PersistentVolumeClaimSpec struct {
// Contains the types of access modes required
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"`
// A label query over volumes to consider for binding
// A label query over volumes to consider for binding. This selector is
// ignored when VolumeName is set
Selector *unversioned.LabelSelector `json:"selector,omitempty"`
// Resources represents the minimum resources required
Resources ResourceRequirements `json:"resources,omitempty"`
// VolumeName is the binding reference to the PersistentVolume backing this claim
// VolumeName is the binding reference to the PersistentVolume backing this
// claim. When set to non-empty value Selector is not evaluated
VolumeName string `json:"volumeName,omitempty"`
}
@ -1983,11 +1987,22 @@ type NodeStatus struct {
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"`
// List of container images on this node
Images []ContainerImage `json:"images,omitempty"`
// List of attachable volume devices in use (mounted) by the node.
VolumesInUse []UniqueDeviceName `json:"volumesInUse,omitempty"`
// List of attachable volumes in use (mounted) by the node.
VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty"`
// List of volumes that are attached to the node.
VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty"`
}
type UniqueDeviceName string
type UniqueVolumeName string
// AttachedVolume describes a volume attached to a node
type AttachedVolume struct {
// Name of the attached volume
Name UniqueVolumeName `json:"name"`
// DevicePath represents the device path where the volume should be avilable
DevicePath string `json:"devicePath"`
}
// Describe a container image
type ContainerImage struct {

62
vendor/k8s.io/kubernetes/pkg/api/unversioned/meta.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package unversioned
// ListMetaAccessor retrieves the list interface from an object
// TODO: move this, and TypeMeta and ListMeta, to a different package
type ListMetaAccessor interface {
GetListMeta() List
}
// List lets you work with list metadata from any of the versioned or
// internal API objects. Attempting to set or retrieve a field on an object that does
// not support that field will be a no-op and return a default value.
// TODO: move this, and TypeMeta and ListMeta, to a different package
type List interface {
GetResourceVersion() string
SetResourceVersion(version string)
GetSelfLink() string
SetSelfLink(selfLink string)
}
// Type exposes the type and APIVersion of versioned or internal API objects.
// TODO: move this, and TypeMeta and ListMeta, to a different package
type Type interface {
GetAPIVersion() string
SetAPIVersion(version string)
GetKind() string
SetKind(kind string)
}
func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion }
func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version }
func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink }
func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink }
func (obj *TypeMeta) GetObjectKind() ObjectKind { return obj }
// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
func (obj *TypeMeta) SetGroupVersionKind(gvk GroupVersionKind) {
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
}
// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
func (obj *TypeMeta) GroupVersionKind() GroupVersionKind {
return FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
}
func (obj *ListMeta) GetListMeta() List { return obj }

View File

@ -23,20 +23,3 @@ var SchemeGroupVersion = GroupVersion{Group: "", Version: ""}
func Kind(kind string) GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
func (obj *TypeMeta) SetGroupVersionKind(gvk GroupVersionKind) {
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
}
// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
func (obj *TypeMeta) GroupVersionKind() GroupVersionKind {
return FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
}
func (obj *Status) GetObjectKind() ObjectKind { return &obj.TypeMeta }
func (obj *APIVersions) GetObjectKind() ObjectKind { return &obj.TypeMeta }
func (obj *APIGroupList) GetObjectKind() ObjectKind { return &obj.TypeMeta }
func (obj *APIGroup) GetObjectKind() ObjectKind { return &obj.TypeMeta }
func (obj *APIResourceList) GetObjectKind() ObjectKind { return &obj.TypeMeta }
func (obj *ExportOptions) GetObjectKind() ObjectKind { return &obj.TypeMeta }

View File

@ -33,6 +33,8 @@ func init() {
Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource,
Convert_v1_Affinity_To_api_Affinity,
Convert_api_Affinity_To_v1_Affinity,
Convert_v1_AttachedVolume_To_api_AttachedVolume,
Convert_api_AttachedVolume_To_v1_AttachedVolume,
Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource,
Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource,
Convert_v1_Binding_To_api_Binding,
@ -425,6 +427,26 @@ func Convert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conv
return autoConvert_api_Affinity_To_v1_Affinity(in, out, s)
}
func autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in *AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error {
out.Name = api.UniqueVolumeName(in.Name)
out.DevicePath = in.DevicePath
return nil
}
func Convert_v1_AttachedVolume_To_api_AttachedVolume(in *AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error {
return autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in, out, s)
}
func autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *AttachedVolume, s conversion.Scope) error {
out.Name = UniqueVolumeName(in.Name)
out.DevicePath = in.DevicePath
return nil
}
func Convert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *AttachedVolume, s conversion.Scope) error {
return autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in, out, s)
}
func autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error {
out.SecretName = in.SecretName
out.ShareName = in.ShareName
@ -3390,13 +3412,24 @@ func autoConvert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeSt
}
if in.VolumesInUse != nil {
in, out := &in.VolumesInUse, &out.VolumesInUse
*out = make([]api.UniqueDeviceName, len(*in))
*out = make([]api.UniqueVolumeName, len(*in))
for i := range *in {
(*out)[i] = api.UniqueDeviceName((*in)[i])
(*out)[i] = api.UniqueVolumeName((*in)[i])
}
} else {
out.VolumesInUse = nil
}
if in.VolumesAttached != nil {
in, out := &in.VolumesAttached, &out.VolumesAttached
*out = make([]api.AttachedVolume, len(*in))
for i := range *in {
if err := Convert_v1_AttachedVolume_To_api_AttachedVolume(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.VolumesAttached = nil
}
return nil
}
@ -3473,13 +3506,24 @@ func autoConvert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeSt
}
if in.VolumesInUse != nil {
in, out := &in.VolumesInUse, &out.VolumesInUse
*out = make([]UniqueDeviceName, len(*in))
*out = make([]UniqueVolumeName, len(*in))
for i := range *in {
(*out)[i] = UniqueDeviceName((*in)[i])
(*out)[i] = UniqueVolumeName((*in)[i])
}
} else {
out.VolumesInUse = nil
}
if in.VolumesAttached != nil {
in, out := &in.VolumesAttached, &out.VolumesAttached
*out = make([]AttachedVolume, len(*in))
for i := range *in {
if err := Convert_api_AttachedVolume_To_v1_AttachedVolume(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.VolumesAttached = nil
}
return nil
}

View File

@ -34,6 +34,7 @@ func init() {
if err := api.Scheme.AddGeneratedDeepCopyFuncs(
DeepCopy_v1_AWSElasticBlockStoreVolumeSource,
DeepCopy_v1_Affinity,
DeepCopy_v1_AttachedVolume,
DeepCopy_v1_AzureFileVolumeSource,
DeepCopy_v1_Binding,
DeepCopy_v1_Capabilities,
@ -225,6 +226,12 @@ func DeepCopy_v1_Affinity(in Affinity, out *Affinity, c *conversion.Cloner) erro
return nil
}
func DeepCopy_v1_AttachedVolume(in AttachedVolume, out *AttachedVolume, c *conversion.Cloner) error {
out.Name = in.Name
out.DevicePath = in.DevicePath
return nil
}
func DeepCopy_v1_AzureFileVolumeSource(in AzureFileVolumeSource, out *AzureFileVolumeSource, c *conversion.Cloner) error {
out.SecretName = in.SecretName
out.ShareName = in.ShareName
@ -1550,13 +1557,24 @@ func DeepCopy_v1_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner
}
if in.VolumesInUse != nil {
in, out := in.VolumesInUse, &out.VolumesInUse
*out = make([]UniqueDeviceName, len(in))
*out = make([]UniqueVolumeName, len(in))
for i := range in {
(*out)[i] = in[i]
}
} else {
out.VolumesInUse = nil
}
if in.VolumesAttached != nil {
in, out := in.VolumesAttached, &out.VolumesAttached
*out = make([]AttachedVolume, len(in))
for i := range in {
if err := DeepCopy_v1_AttachedVolume(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.VolumesAttached = nil
}
return nil
}

View File

@ -27,6 +27,7 @@ limitations under the License.
It has these top-level messages:
AWSElasticBlockStoreVolumeSource
Affinity
AttachedVolume
AzureFileVolumeSource
Binding
Capabilities
@ -201,6 +202,10 @@ func (m *Affinity) Reset() { *m = Affinity{} }
func (m *Affinity) String() string { return proto.CompactTextString(m) }
func (*Affinity) ProtoMessage() {}
func (m *AttachedVolume) Reset() { *m = AttachedVolume{} }
func (m *AttachedVolume) String() string { return proto.CompactTextString(m) }
func (*AttachedVolume) ProtoMessage() {}
func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} }
func (m *AzureFileVolumeSource) String() string { return proto.CompactTextString(m) }
func (*AzureFileVolumeSource) ProtoMessage() {}
@ -788,6 +793,7 @@ func (*WeightedPodAffinityTerm) ProtoMessage() {}
func init() {
proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.AWSElasticBlockStoreVolumeSource")
proto.RegisterType((*Affinity)(nil), "k8s.io.kubernetes.pkg.api.v1.Affinity")
proto.RegisterType((*AttachedVolume)(nil), "k8s.io.kubernetes.pkg.api.v1.AttachedVolume")
proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.AzureFileVolumeSource")
proto.RegisterType((*Binding)(nil), "k8s.io.kubernetes.pkg.api.v1.Binding")
proto.RegisterType((*Capabilities)(nil), "k8s.io.kubernetes.pkg.api.v1.Capabilities")
@ -1020,6 +1026,32 @@ func (m *Affinity) MarshalTo(data []byte) (int, error) {
return i, nil
}
func (m *AttachedVolume) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *AttachedVolume) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
data[i] = 0xa
i++
i = encodeVarintGenerated(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
data[i] = 0x12
i++
i = encodeVarintGenerated(data, i, uint64(len(m.DevicePath)))
i += copy(data[i:], m.DevicePath)
return i, nil
}
func (m *AzureFileVolumeSource) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
@ -4174,6 +4206,18 @@ func (m *NodeStatus) MarshalTo(data []byte) (int, error) {
i += copy(data[i:], s)
}
}
if len(m.VolumesAttached) > 0 {
for _, msg := range m.VolumesAttached {
data[i] = 0x52
i++
i = encodeVarintGenerated(data, i, uint64(msg.Size()))
n, err := msg.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
@ -7735,6 +7779,16 @@ func (m *Affinity) Size() (n int) {
return n
}
func (m *AttachedVolume) Size() (n int) {
var l int
_ = l
l = len(m.Name)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.DevicePath)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *AzureFileVolumeSource) Size() (n int) {
var l int
_ = l
@ -8887,6 +8941,12 @@ func (m *NodeStatus) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
if len(m.VolumesAttached) > 0 {
for _, e := range m.VolumesAttached {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
@ -10492,6 +10552,114 @@ func (m *Affinity) Unmarshal(data []byte) error {
}
return nil
}
func (m *AttachedVolume) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = UniqueVolumeName(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DevicePath = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *AzureFileVolumeSource) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
@ -21633,7 +21801,38 @@ func (m *NodeStatus) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.VolumesInUse = append(m.VolumesInUse, UniqueDeviceName(data[iNdEx:postIndex]))
m.VolumesInUse = append(m.VolumesInUse, UniqueVolumeName(data[iNdEx:postIndex]))
iNdEx = postIndex
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field VolumesAttached", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.VolumesAttached = append(m.VolumesAttached, AttachedVolume{})
if err := m.VolumesAttached[len(m.VolumesAttached)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex

View File

@ -71,6 +71,15 @@ message Affinity {
optional PodAntiAffinity podAntiAffinity = 3;
}
// AttachedVolume describes a volume attached to a node
message AttachedVolume {
// Name of the attached volume
optional string name = 1;
// DevicePath represents the device path where the volume should be avilable
optional string devicePath = 2;
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
message AzureFileVolumeSource {
// the name of secret that contains Azure Storage Account Name and Key
@ -1304,8 +1313,11 @@ message NodeStatus {
// List of container images on this node
repeated ContainerImage images = 8;
// List of volumes in use (mounted) by the node.
// List of attachable volumes in use (mounted) by the node.
repeated string volumesInUse = 9;
// List of volumes that are attached to the node.
repeated AttachedVolume volumesAttached = 10;
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.

85
vendor/k8s.io/kubernetes/pkg/api/v1/meta.go generated vendored Normal file
View File

@ -0,0 +1,85 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/meta/metatypes"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/types"
)
func (obj *ObjectMeta) GetObjectMeta() meta.Object { return obj }
// Namespace implements meta.Object for any object with an ObjectMeta typed field. Allows
// fast, direct access to metadata fields for API objects.
func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace }
func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace }
func (meta *ObjectMeta) GetName() string { return meta.Name }
func (meta *ObjectMeta) SetName(name string) { meta.Name = name }
func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName }
func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName }
func (meta *ObjectMeta) GetUID() types.UID { return meta.UID }
func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid }
func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion }
func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version }
func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink }
func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink }
func (meta *ObjectMeta) GetCreationTimestamp() unversioned.Time { return meta.CreationTimestamp }
func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp unversioned.Time) {
meta.CreationTimestamp = creationTimestamp
}
func (meta *ObjectMeta) GetDeletionTimestamp() *unversioned.Time { return meta.DeletionTimestamp }
func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *unversioned.Time) {
meta.DeletionTimestamp = deletionTimestamp
}
func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels }
func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels }
func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations }
func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations }
func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers }
func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers }
func (meta *ObjectMeta) GetOwnerReferences() []metatypes.OwnerReference {
ret := make([]metatypes.OwnerReference, len(meta.OwnerReferences))
for i := 0; i < len(meta.OwnerReferences); i++ {
ret[i].Kind = meta.OwnerReferences[i].Kind
ret[i].Name = meta.OwnerReferences[i].Name
ret[i].UID = meta.OwnerReferences[i].UID
ret[i].APIVersion = meta.OwnerReferences[i].APIVersion
if meta.OwnerReferences[i].Controller != nil {
value := *meta.OwnerReferences[i].Controller
ret[i].Controller = &value
}
}
return ret
}
func (meta *ObjectMeta) SetOwnerReferences(references []metatypes.OwnerReference) {
newReferences := make([]OwnerReference, len(references))
for i := 0; i < len(references); i++ {
newReferences[i].Kind = references[i].Kind
newReferences[i].Name = references[i].Name
newReferences[i].UID = references[i].UID
newReferences[i].APIVersion = references[i].APIVersion
if references[i].Controller != nil {
value := *references[i].Controller
newReferences[i].Controller = &value
}
}
meta.OwnerReferences = newReferences
}

View File

@ -92,50 +92,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
// Add the watch version that applies
versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
}
func (obj *Pod) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodStatusResult) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodTemplate) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodTemplateList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ReplicationController) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ReplicationControllerList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Service) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ServiceList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Endpoints) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *EndpointsList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Node) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *NodeList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *NodeProxyOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Binding) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Event) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *EventList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *List) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *LimitRange) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *LimitRangeList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ResourceQuota) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ResourceQuotaList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Namespace) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *NamespaceList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Secret) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *SecretList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ServiceAccount) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ServiceAccountList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PersistentVolume) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PersistentVolumeList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PersistentVolumeClaim) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PersistentVolumeClaimList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *DeleteOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ListOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodAttachOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodLogOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodExecOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodProxyOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ServiceProxyOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ComponentStatus) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ComponentStatusList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *SerializedReference) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *RangeAllocation) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ExportOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ConfigMap) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ConfigMapList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -36330,7 +36330,7 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
} else {
yysep2 := !z.EncBinary()
yy2arr2 := z.EncBasicHandle().StructToArray
var yyq2 [9]bool
var yyq2 [10]bool
_, _, _ = yysep2, yyq2, yy2arr2
const yyr2 bool = false
yyq2[0] = len(x.Capacity) != 0
@ -36342,9 +36342,10 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
yyq2[6] = true
yyq2[7] = len(x.Images) != 0
yyq2[8] = len(x.VolumesInUse) != 0
yyq2[9] = len(x.VolumesAttached) != 0
var yynn2 int
if yyr2 || yy2arr2 {
r.EncodeArrayStart(9)
r.EncodeArrayStart(10)
} else {
yynn2 = 0
for _, b := range yyq2 {
@ -36559,7 +36560,7 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
_ = yym32
if false {
} else {
h.encSliceUniqueDeviceName(([]UniqueDeviceName)(x.VolumesInUse), e)
h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e)
}
}
} else {
@ -36577,7 +36578,40 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) {
_ = yym33
if false {
} else {
h.encSliceUniqueDeviceName(([]UniqueDeviceName)(x.VolumesInUse), e)
h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e)
}
}
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq2[9] {
if x.VolumesAttached == nil {
r.EncodeNil()
} else {
yym35 := z.EncBinary()
_ = yym35
if false {
} else {
h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e)
}
}
} else {
r.EncodeNil()
}
} else {
if yyq2[9] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("volumesAttached"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.VolumesAttached == nil {
r.EncodeNil()
} else {
yym36 := z.EncBinary()
_ = yym36
if false {
} else {
h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e)
}
}
}
@ -36722,7 +36756,19 @@ func (x *NodeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
_ = yym16
if false {
} else {
h.decSliceUniqueDeviceName((*[]UniqueDeviceName)(yyv15), d)
h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv15), d)
}
}
case "volumesAttached":
if r.TryDecodeAsNil() {
x.VolumesAttached = nil
} else {
yyv17 := &x.VolumesAttached
yym18 := z.DecBinary()
_ = yym18
if false {
} else {
h.decSliceAttachedVolume((*[]AttachedVolume)(yyv17), d)
}
}
default:
@ -36736,16 +36782,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj17 int
var yyb17 bool
var yyhl17 bool = l >= 0
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
var yyj19 int
var yyb19 bool
var yyhl19 bool = l >= 0
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -36753,16 +36799,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.Capacity = nil
} else {
yyv18 := &x.Capacity
yyv18.CodecDecodeSelf(d)
yyv20 := &x.Capacity
yyv20.CodecDecodeSelf(d)
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -36770,16 +36816,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.Allocatable = nil
} else {
yyv19 := &x.Allocatable
yyv19.CodecDecodeSelf(d)
yyv21 := &x.Allocatable
yyv21.CodecDecodeSelf(d)
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -36789,13 +36835,13 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
} else {
x.Phase = NodePhase(r.DecodeString())
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -36803,21 +36849,21 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.Conditions = nil
} else {
yyv21 := &x.Conditions
yym22 := z.DecBinary()
_ = yym22
yyv23 := &x.Conditions
yym24 := z.DecBinary()
_ = yym24
if false {
} else {
h.decSliceNodeCondition((*[]NodeCondition)(yyv21), d)
h.decSliceNodeCondition((*[]NodeCondition)(yyv23), d)
}
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -36825,21 +36871,21 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.Addresses = nil
} else {
yyv23 := &x.Addresses
yym24 := z.DecBinary()
_ = yym24
yyv25 := &x.Addresses
yym26 := z.DecBinary()
_ = yym26
if false {
} else {
h.decSliceNodeAddress((*[]NodeAddress)(yyv23), d)
h.decSliceNodeAddress((*[]NodeAddress)(yyv25), d)
}
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -36847,16 +36893,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.DaemonEndpoints = NodeDaemonEndpoints{}
} else {
yyv25 := &x.DaemonEndpoints
yyv25.CodecDecodeSelf(d)
yyv27 := &x.DaemonEndpoints
yyv27.CodecDecodeSelf(d)
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -36864,16 +36910,16 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.NodeInfo = NodeSystemInfo{}
} else {
yyv26 := &x.NodeInfo
yyv26.CodecDecodeSelf(d)
yyv28 := &x.NodeInfo
yyv28.CodecDecodeSelf(d)
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -36881,21 +36927,21 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.Images = nil
} else {
yyv27 := &x.Images
yym28 := z.DecBinary()
_ = yym28
yyv29 := &x.Images
yym30 := z.DecBinary()
_ = yym30
if false {
} else {
h.decSliceContainerImage((*[]ContainerImage)(yyv27), d)
h.decSliceContainerImage((*[]ContainerImage)(yyv29), d)
}
}
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -36903,31 +36949,53 @@ func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.VolumesInUse = nil
} else {
yyv29 := &x.VolumesInUse
yym30 := z.DecBinary()
_ = yym30
yyv31 := &x.VolumesInUse
yym32 := z.DecBinary()
_ = yym32
if false {
} else {
h.decSliceUniqueDeviceName((*[]UniqueDeviceName)(yyv29), d)
h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv31), d)
}
}
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb19 = r.CheckBreak()
}
if yyb19 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.VolumesAttached = nil
} else {
yyv33 := &x.VolumesAttached
yym34 := z.DecBinary()
_ = yym34
if false {
} else {
h.decSliceAttachedVolume((*[]AttachedVolume)(yyv33), d)
}
}
for {
yyj17++
if yyhl17 {
yyb17 = yyj17 > l
yyj19++
if yyhl19 {
yyb19 = yyj19 > l
} else {
yyb17 = r.CheckBreak()
yyb19 = r.CheckBreak()
}
if yyb17 {
if yyb19 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj17-1, "")
z.DecStructFieldNotFound(yyj19-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x UniqueDeviceName) CodecEncodeSelf(e *codec1978.Encoder) {
func (x UniqueVolumeName) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
@ -36940,7 +37008,7 @@ func (x UniqueDeviceName) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
func (x *UniqueDeviceName) CodecDecodeSelf(d *codec1978.Decoder) {
func (x *UniqueVolumeName) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
@ -36953,6 +37021,199 @@ func (x *UniqueDeviceName) CodecDecodeSelf(d *codec1978.Decoder) {
}
}
func (x *AttachedVolume) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
if x == nil {
r.EncodeNil()
} else {
yym1 := z.EncBinary()
_ = yym1
if false {
} else if z.HasExtensions() && z.EncExt(x) {
} else {
yysep2 := !z.EncBinary()
yy2arr2 := z.EncBasicHandle().StructToArray
var yyq2 [2]bool
_, _, _ = yysep2, yyq2, yy2arr2
const yyr2 bool = false
var yynn2 int
if yyr2 || yy2arr2 {
r.EncodeArrayStart(2)
} else {
yynn2 = 2
for _, b := range yyq2 {
if b {
yynn2++
}
}
r.EncodeMapStart(yynn2)
yynn2 = 0
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
x.Name.CodecEncodeSelf(e)
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("name"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
x.Name.CodecEncodeSelf(e)
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yym7 := z.EncBinary()
_ = yym7
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath))
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("devicePath"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym8 := z.EncBinary()
_ = yym8
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath))
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
}
}
}
}
func (x *AttachedVolume) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yym1 := z.DecBinary()
_ = yym1
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
yyct2 := r.ContainerType()
if yyct2 == codecSelferValueTypeMap1234 {
yyl2 := r.ReadMapStart()
if yyl2 == 0 {
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
} else {
x.codecDecodeSelfFromMap(yyl2, d)
}
} else if yyct2 == codecSelferValueTypeArray1234 {
yyl2 := r.ReadArrayStart()
if yyl2 == 0 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
x.codecDecodeSelfFromArray(yyl2, d)
}
} else {
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
}
}
}
func (x *AttachedVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yys3Slc = z.DecScratchBuffer() // default slice to decode into
_ = yys3Slc
var yyhl3 bool = l >= 0
for yyj3 := 0; ; yyj3++ {
if yyhl3 {
if yyj3 >= l {
break
}
} else {
if r.CheckBreak() {
break
}
}
z.DecSendContainerState(codecSelfer_containerMapKey1234)
yys3Slc = r.DecodeBytes(yys3Slc, true, true)
yys3 := string(yys3Slc)
z.DecSendContainerState(codecSelfer_containerMapValue1234)
switch yys3 {
case "name":
if r.TryDecodeAsNil() {
x.Name = ""
} else {
x.Name = UniqueVolumeName(r.DecodeString())
}
case "devicePath":
if r.TryDecodeAsNil() {
x.DevicePath = ""
} else {
x.DevicePath = string(r.DecodeString())
}
default:
z.DecStructFieldNotFound(-1, yys3)
} // end switch yys3
} // end for yyj3
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
}
func (x *AttachedVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj6 int
var yyb6 bool
var yyhl6 bool = l >= 0
yyj6++
if yyhl6 {
yyb6 = yyj6 > l
} else {
yyb6 = r.CheckBreak()
}
if yyb6 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.Name = ""
} else {
x.Name = UniqueVolumeName(r.DecodeString())
}
yyj6++
if yyhl6 {
yyb6 = yyj6 > l
} else {
yyb6 = r.CheckBreak()
}
if yyb6 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.DevicePath = ""
} else {
x.DevicePath = string(r.DecodeString())
}
for {
yyj6++
if yyhl6 {
yyb6 = yyj6 > l
} else {
yyb6 = r.CheckBreak()
}
if yyb6 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj6-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x *ContainerImage) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
@ -57420,7 +57681,7 @@ func (x codecSelfer1234) decSliceContainerImage(v *[]ContainerImage, d *codec197
}
}
func (x codecSelfer1234) encSliceUniqueDeviceName(v []UniqueDeviceName, e *codec1978.Encoder) {
func (x codecSelfer1234) encSliceUniqueVolumeName(v []UniqueVolumeName, e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
@ -57432,7 +57693,7 @@ func (x codecSelfer1234) encSliceUniqueDeviceName(v []UniqueDeviceName, e *codec
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *codec1978.Decoder) {
func (x codecSelfer1234) decSliceUniqueVolumeName(v *[]UniqueVolumeName, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
@ -57443,7 +57704,7 @@ func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *code
_ = yyc1
if yyl1 == 0 {
if yyv1 == nil {
yyv1 = []UniqueDeviceName{}
yyv1 = []UniqueVolumeName{}
yyc1 = true
} else if len(yyv1) != 0 {
yyv1 = yyv1[:0]
@ -57461,10 +57722,10 @@ func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *code
if yyrl1 <= cap(yyv1) {
yyv1 = yyv1[:yyrl1]
} else {
yyv1 = make([]UniqueDeviceName, yyrl1)
yyv1 = make([]UniqueVolumeName, yyrl1)
}
} else {
yyv1 = make([]UniqueDeviceName, yyrl1)
yyv1 = make([]UniqueVolumeName, yyrl1)
}
yyc1 = true
yyrr1 = len(yyv1)
@ -57478,7 +57739,7 @@ func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *code
if r.TryDecodeAsNil() {
yyv1[yyj1] = ""
} else {
yyv1[yyj1] = UniqueDeviceName(r.DecodeString())
yyv1[yyj1] = UniqueVolumeName(r.DecodeString())
}
}
@ -57489,7 +57750,7 @@ func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *code
if r.TryDecodeAsNil() {
yyv1[yyj1] = ""
} else {
yyv1[yyj1] = UniqueDeviceName(r.DecodeString())
yyv1[yyj1] = UniqueVolumeName(r.DecodeString())
}
}
@ -57500,7 +57761,7 @@ func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *code
for ; !r.CheckBreak(); yyj1++ {
if yyj1 >= len(yyv1) {
yyv1 = append(yyv1, "") // var yyz1 UniqueDeviceName
yyv1 = append(yyv1, "") // var yyz1 UniqueVolumeName
yyc1 = true
}
yyh1.ElemContainerState(yyj1)
@ -57508,7 +57769,7 @@ func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *code
if r.TryDecodeAsNil() {
yyv1[yyj1] = ""
} else {
yyv1[yyj1] = UniqueDeviceName(r.DecodeString())
yyv1[yyj1] = UniqueVolumeName(r.DecodeString())
}
} else {
@ -57520,7 +57781,126 @@ func (x codecSelfer1234) decSliceUniqueDeviceName(v *[]UniqueDeviceName, d *code
yyv1 = yyv1[:yyj1]
yyc1 = true
} else if yyj1 == 0 && yyv1 == nil {
yyv1 = []UniqueDeviceName{}
yyv1 = []UniqueVolumeName{}
yyc1 = true
}
}
yyh1.End()
if yyc1 {
*v = yyv1
}
}
func (x codecSelfer1234) encSliceAttachedVolume(v []AttachedVolume, e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
r.EncodeArrayStart(len(v))
for _, yyv1 := range v {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yy2 := &yyv1
yy2.CodecEncodeSelf(e)
}
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
}
func (x codecSelfer1234) decSliceAttachedVolume(v *[]AttachedVolume, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yyv1 := *v
yyh1, yyl1 := z.DecSliceHelperStart()
var yyc1 bool
_ = yyc1
if yyl1 == 0 {
if yyv1 == nil {
yyv1 = []AttachedVolume{}
yyc1 = true
} else if len(yyv1) != 0 {
yyv1 = yyv1[:0]
yyc1 = true
}
} else if yyl1 > 0 {
var yyrr1, yyrl1 int
var yyrt1 bool
_, _ = yyrl1, yyrt1
yyrr1 = yyl1 // len(yyv1)
if yyl1 > cap(yyv1) {
yyrg1 := len(yyv1) > 0
yyv21 := yyv1
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32)
if yyrt1 {
if yyrl1 <= cap(yyv1) {
yyv1 = yyv1[:yyrl1]
} else {
yyv1 = make([]AttachedVolume, yyrl1)
}
} else {
yyv1 = make([]AttachedVolume, yyrl1)
}
yyc1 = true
yyrr1 = len(yyv1)
if yyrg1 {
copy(yyv1, yyv21)
}
} else if yyl1 != len(yyv1) {
yyv1 = yyv1[:yyl1]
yyc1 = true
}
yyj1 := 0
for ; yyj1 < yyrr1; yyj1++ {
yyh1.ElemContainerState(yyj1)
if r.TryDecodeAsNil() {
yyv1[yyj1] = AttachedVolume{}
} else {
yyv2 := &yyv1[yyj1]
yyv2.CodecDecodeSelf(d)
}
}
if yyrt1 {
for ; yyj1 < yyl1; yyj1++ {
yyv1 = append(yyv1, AttachedVolume{})
yyh1.ElemContainerState(yyj1)
if r.TryDecodeAsNil() {
yyv1[yyj1] = AttachedVolume{}
} else {
yyv3 := &yyv1[yyj1]
yyv3.CodecDecodeSelf(d)
}
}
}
} else {
yyj1 := 0
for ; !r.CheckBreak(); yyj1++ {
if yyj1 >= len(yyv1) {
yyv1 = append(yyv1, AttachedVolume{}) // var yyz1 AttachedVolume
yyc1 = true
}
yyh1.ElemContainerState(yyj1)
if yyj1 < len(yyv1) {
if r.TryDecodeAsNil() {
yyv1[yyj1] = AttachedVolume{}
} else {
yyv4 := &yyv1[yyj1]
yyv4.CodecDecodeSelf(d)
}
} else {
z.DecSwallow()
}
}
if yyj1 < len(yyv1) {
yyv1 = yyv1[:yyj1]
yyc1 = true
} else if yyj1 == 0 && yyv1 == nil {
yyv1 = []AttachedVolume{}
yyc1 = true
}
}
@ -57683,7 +58063,7 @@ func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) {
yyrg1 := len(yyv1) > 0
yyv21 := yyv1
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 592)
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 616)
if yyrt1 {
if yyrl1 <= cap(yyv1) {
yyv1 = yyv1[:yyrl1]

View File

@ -2386,11 +2386,22 @@ type NodeStatus struct {
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
// List of container images on this node
Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
// List of volumes in use (mounted) by the node.
VolumesInUse []UniqueDeviceName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
// List of attachable volumes in use (mounted) by the node.
VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
// List of volumes that are attached to the node.
VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
}
type UniqueDeviceName string
type UniqueVolumeName string
// AttachedVolume describes a volume attached to a node
type AttachedVolume struct {
// Name of the attached volume
Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"`
// DevicePath represents the device path where the volume should be avilable
DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"`
}
// Describe a container image
type ContainerImage struct {

View File

@ -50,6 +50,16 @@ func (Affinity) SwaggerDoc() map[string]string {
return map_Affinity
}
var map_AttachedVolume = map[string]string{
"": "AttachedVolume describes a volume attached to a node",
"name": "Name of the attached volume",
"devicePath": "DevicePath represents the device path where the volume should be avilable",
}
func (AttachedVolume) SwaggerDoc() map[string]string {
return map_AttachedVolume
}
var map_AzureFileVolumeSource = map[string]string{
"": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
"secretName": "the name of secret that contains Azure Storage Account Name and Key",
@ -880,7 +890,8 @@ var map_NodeStatus = map[string]string{
"daemonEndpoints": "Endpoints of daemons running on the Node.",
"nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#node-info",
"images": "List of container images on this node",
"volumesInUse": "List of volumes in use (mounted) by the node.",
"volumesInUse": "List of attachable volumes in use (mounted) by the node.",
"volumesAttached": "List of volumes that are attached to the node.",
}
func (NodeStatus) SwaggerDoc() map[string]string {

View File

@ -25,15 +25,16 @@ import (
// ValidateEvent makes sure that the event makes sense.
func ValidateEvent(event *api.Event) field.ErrorList {
allErrs := field.ErrorList{}
// There is no namespace required for node.
// There is no namespace required for node or persistent volume.
// However, older client code accidentally sets event.Namespace
// to api.NamespaceDefault, so we accept that too, but "" is preferred.
if event.InvolvedObject.Kind == "Node" &&
if (event.InvolvedObject.Kind == "Node" || event.InvolvedObject.Kind == "PersistentVolume") &&
event.Namespace != api.NamespaceDefault &&
event.Namespace != "" {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "not allowed for node"))
}
if event.InvolvedObject.Kind != "Node" &&
event.InvolvedObject.Kind != "PersistentVolume" &&
event.Namespace != event.InvolvedObject.Namespace {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match involvedObject"))
}

View File

@ -224,7 +224,7 @@ func (s *SwaggerSchema) ValidateObject(obj interface{}, fieldName, typeName stri
// Special case for runtime.RawExtension and runtime.Objects because they always fail to validate
// This is because the actual values will be of some sub-type (e.g. Deployment) not the expected
// super-type (RawExtention)
// super-type (RawExtension)
if s.isGenericArray(details) {
errs := s.validateItems(value)
if len(errs) > 0 {

View File

@ -122,6 +122,8 @@ func ValidatePodSpecificAnnotations(annotations map[string]string, fldPath *fiel
}
}
allErrs = append(allErrs, ValidateSeccompPodAnnotations(annotations, fldPath)...)
return allErrs
}
@ -1167,7 +1169,7 @@ func validateContainerResourceFieldSelector(fs *api.ResourceFieldSelector, expre
}
var validContainerResourceDivisorForCPU = sets.NewString("1m", "1")
var validContainerResourceDivisorForMemory = sets.NewString("1m", "1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei")
var validContainerResourceDivisorForMemory = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei")
func validateContainerResourceDivisor(rName string, divisor resource.Quantity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
@ -1182,7 +1184,7 @@ func validateContainerResourceDivisor(rName string, divisor resource.Quantity, f
}
case "limits.memory", "requests.memory":
if !validContainerResourceDivisorForMemory.Has(divisor.String()) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, fmt.Sprintf("only divisor's values 1m, 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the memory resource")))
allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, fmt.Sprintf("only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the memory resource")))
}
}
return allErrs
@ -1846,6 +1848,33 @@ func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath
return allErrs
}
func validateSeccompProfile(p string, fldPath *field.Path) field.ErrorList {
if p == "docker/default" {
return nil
}
if p == "unconfined" {
return nil
}
if strings.HasPrefix(p, "localhost/") {
return validateSubPath(strings.TrimPrefix(p, "localhost/"), fldPath)
}
return field.ErrorList{field.Invalid(fldPath, p, "must be a valid seccomp profile")}
}
func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if p, exists := annotations[api.SeccompPodAnnotationKey]; exists {
allErrs = append(allErrs, validateSeccompProfile(p, fldPath.Child(api.SeccompPodAnnotationKey))...)
}
for k, p := range annotations {
if strings.HasPrefix(k, api.SeccompContainerAnnotationKeyPrefix) {
allErrs = append(allErrs, validateSeccompProfile(p, fldPath.Child(k))...)
}
}
return allErrs
}
// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data.
func ValidatePodSecurityContext(securityContext *api.PodSecurityContext, spec *api.PodSpec, specPath, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}

View File

@ -52,6 +52,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
&api.ListOptions{},
)
}
func (obj *PetSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PetSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -48,7 +48,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
&LocalSubjectAccessReview{},
)
}
func (obj *LocalSubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *SubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *SelfSubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -52,7 +52,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
&api.ListOptions{},
)
}
func (obj *Scale) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *HorizontalPodAutoscaler) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *HorizontalPodAutoscalerList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -44,7 +44,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
)
versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
}
func (obj *HorizontalPodAutoscaler) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *HorizontalPodAutoscalerList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Scale) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -54,9 +54,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
&api.ListOptions{},
)
}
func (obj *Job) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *JobList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *JobTemplate) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ScheduledJob) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ScheduledJobList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -44,6 +44,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
)
versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
}
func (obj *Job) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *JobList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -47,9 +47,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
)
versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
}
func (obj *Job) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *JobList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *JobTemplate) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ScheduledJob) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ScheduledJobList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -273,6 +273,9 @@ func DeepCopy_componentconfig_KubeletConfiguration(in KubeletConfiguration, out
out.SystemCgroups = in.SystemCgroups
out.CgroupRoot = in.CgroupRoot
out.ContainerRuntime = in.ContainerRuntime
if err := unversioned.DeepCopy_unversioned_Duration(in.RuntimeRequestTimeout, &out.RuntimeRequestTimeout, c); err != nil {
return err
}
out.RktPath = in.RktPath
out.RktAPIEndpoint = in.RktAPIEndpoint
out.RktStage1Image = in.RktStage1Image
@ -361,6 +364,7 @@ func DeepCopy_componentconfig_PortRangeVar(in PortRangeVar, out *PortRangeVar, c
func DeepCopy_componentconfig_VolumeConfiguration(in VolumeConfiguration, out *VolumeConfiguration, c *conversion.Cloner) error {
out.EnableHostPathProvisioning = in.EnableHostPathProvisioning
out.EnableDynamicProvisioning = in.EnableDynamicProvisioning
if err := DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration(in.PersistentVolumeRecyclerConfiguration, &out.PersistentVolumeRecyclerConfiguration, c); err != nil {
return err
}

View File

@ -48,6 +48,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
&KubeSchedulerConfiguration{},
)
}
func (obj *KubeProxyConfiguration) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *KubeSchedulerConfiguration) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

File diff suppressed because it is too large Load Diff

View File

@ -263,6 +263,9 @@ type KubeletConfiguration struct {
CgroupRoot string `json:"cgroupRoot,omitempty"`
// containerRuntime is the container runtime to use.
ContainerRuntime string `json:"containerRuntime"`
// runtimeRequestTimeout is the timeout for all runtime requests except long running
// requests - pull, logs, exec and attach.
RuntimeRequestTimeout unversioned.Duration `json:"runtimeRequestTimeout,omitempty"`
// rktPath is the path of rkt binary. Leave empty to use the first rkt in
// $PATH.
RktPath string `json:"rktPath,omitempty"`
@ -577,6 +580,9 @@ type VolumeConfiguration struct {
// provisioning is not supported in any way, won't work in a multi-node cluster, and
// should not be used for anything other than testing or development.
EnableHostPathProvisioning bool `json:"enableHostPathProvisioning"`
// enableDynamicProvisioning enables the provisioning of volumes when running within an environment
// that supports dynamic provisioning. Defaults to true.
EnableDynamicProvisioning bool `json:"enableDynamicProvisioning"`
// persistentVolumeRecyclerConfiguration holds configuration for persistent volume plugins.
PersistentVolumeRecyclerConfiguration PersistentVolumeRecyclerConfiguration `json:"persitentVolumeRecyclerConfiguration"`
// volumePluginDir is the full path of the directory in which the flex

View File

@ -38,6 +38,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
&KubeSchedulerConfiguration{},
)
}
func (obj *KubeProxyConfiguration) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *KubeSchedulerConfiguration) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -77,23 +77,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
&NetworkPolicyList{},
)
}
func (obj *Deployment) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *DeploymentList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *DeploymentRollback) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ReplicationControllerDummy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Scale) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ThirdPartyResource) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ThirdPartyResourceList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *DaemonSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *DaemonSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ThirdPartyResourceData) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ThirdPartyResourceDataList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Ingress) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *IngressList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ReplicaSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ReplicaSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodSecurityPolicy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodSecurityPolicyList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *NetworkPolicy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *NetworkPolicyList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -884,7 +884,7 @@ type NetworkPolicyPeer struct {
// Selects Namespaces using cluster scoped-labels. This
// matches all pods in all namespaces selected by this label selector.
// This field follows standard label selector semantics.
// If omited, this selector selects no namespaces.
// If omitted, this selector selects no namespaces.
// If present but empty, this selector selects all namespaces.
NamespaceSelector *unversioned.LabelSelector `json:"namespaceSelector,omitempty"`
}

View File

@ -675,7 +675,7 @@ message NetworkPolicyPeer {
// Selects Namespaces using cluster scoped-labels. This
// matches all pods in all namespaces selected by this label selector.
// This field follows standard label selector semantics.
// If omited, this selector selects no namespaces.
// If omitted, this selector selects no namespaces.
// If present but empty, this selector selects all namespaces.
optional LabelSelector namespaceSelector = 2;
}

View File

@ -67,28 +67,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
// Add the watch version that applies
versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
}
func (obj *Deployment) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *DeploymentList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *DeploymentRollback) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *HorizontalPodAutoscaler) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *HorizontalPodAutoscalerList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Job) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *JobList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ReplicationControllerDummy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Scale) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ThirdPartyResource) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ThirdPartyResourceList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *DaemonSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *DaemonSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ThirdPartyResourceData) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ThirdPartyResourceDataList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Ingress) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *IngressList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ListOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ReplicaSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ReplicaSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodSecurityPolicy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodSecurityPolicyList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *NetworkPolicy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *NetworkPolicyList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -1178,7 +1178,7 @@ type NetworkPolicyPeer struct {
// Selects Namespaces using cluster scoped-labels. This
// matches all pods in all namespaces selected by this label selector.
// This field follows standard label selector semantics.
// If omited, this selector selects no namespaces.
// If omitted, this selector selects no namespaces.
// If present but empty, this selector selects all namespaces.
NamespaceSelector *LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"`
}

View File

@ -487,7 +487,7 @@ func (NetworkPolicyList) SwaggerDoc() map[string]string {
var map_NetworkPolicyPeer = map[string]string{
"podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If not provided, this selector selects no pods. If present but empty, this selector selects all pods in this namespace.",
"namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If omited, this selector selects no namespaces. If present but empty, this selector selects all namespaces.",
"namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If omitted, this selector selects no namespaces. If present but empty, this selector selects all namespaces.",
}
func (NetworkPolicyPeer) SwaggerDoc() map[string]string {

View File

@ -50,6 +50,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
&PodDisruptionBudgetList{},
)
}
func (obj *PodDisruptionBudget) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodDisruptionBudgetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -45,6 +45,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
// Add the watch version that applies
versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
}
func (obj *PodDisruptionBudget) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *PodDisruptionBudgetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -62,13 +62,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
)
versioned.AddToGroupVersion(scheme, SchemeGroupVersion)
}
func (obj *ClusterRole) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ClusterRoleBinding) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ClusterRoleBindingList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ClusterRoleList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Role) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *RoleBinding) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *RoleBindingList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *RoleList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -46,7 +46,7 @@ message ClusterRoleBinding {
optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
// Subjects holds references to the objects the role applies to.
repeated Subject subject = 2;
repeated Subject subjects = 2;
// RoleRef can only reference a ClusterRole in the global namespace.
// If the RoleRef cannot be resolved, the Authorizer must return an error.
@ -114,7 +114,7 @@ message RoleBinding {
optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
// Subjects holds references to the objects the role applies to.
repeated Subject subject = 2;
repeated Subject subjects = 2;
// RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
// If the RoleRef cannot be resolved, the Authorizer must return an error.

View File

@ -50,13 +50,3 @@ func addKnownTypes(scheme *runtime.Scheme) {
)
versioned.AddToGroupVersion(scheme, SchemeGroupVersion)
}
func (obj *ClusterRole) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ClusterRoleBinding) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ClusterRoleBindingList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ClusterRoleList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Role) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *RoleBinding) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *RoleBindingList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *RoleList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -1270,7 +1270,7 @@ func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) {
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("subject"))
r.EncodeString(codecSelferC_UTF81234, string("subjects"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.Subjects == nil {
r.EncodeNil()
@ -1412,7 +1412,7 @@ func (x *RoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
yyv4 := &x.ObjectMeta
yyv4.CodecDecodeSelf(d)
}
case "subject":
case "subjects":
if r.TryDecodeAsNil() {
x.Subjects = nil
} else {
@ -2633,7 +2633,7 @@ func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) {
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("subject"))
r.EncodeString(codecSelferC_UTF81234, string("subjects"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.Subjects == nil {
r.EncodeNil()
@ -2775,7 +2775,7 @@ func (x *ClusterRoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder)
yyv4 := &x.ObjectMeta
yyv4.CodecDecodeSelf(d)
}
case "subject":
case "subjects":
if r.TryDecodeAsNil() {
x.Subjects = nil
} else {

View File

@ -86,7 +86,7 @@ type RoleBinding struct {
v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Subjects holds references to the objects the role applies to.
Subjects []Subject `json:"subject" protobuf:"bytes,2,rep,name=subjects"`
Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"`
// RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
// If the RoleRef cannot be resolved, the Authorizer must return an error.
@ -135,7 +135,7 @@ type ClusterRoleBinding struct {
v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Subjects holds references to the objects the role applies to.
Subjects []Subject `json:"subject" protobuf:"bytes,2,rep,name=subjects"`
Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"`
// RoleRef can only reference a ClusterRole in the global namespace.
// If the RoleRef cannot be resolved, the Authorizer must return an error.

View File

@ -40,7 +40,7 @@ func (ClusterRole) SwaggerDoc() map[string]string {
var map_ClusterRoleBinding = map[string]string{
"": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.",
"metadata": "Standard object's metadata.",
"subject": "Subjects holds references to the objects the role applies to.",
"subjects": "Subjects holds references to the objects the role applies to.",
"roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
}
@ -95,7 +95,7 @@ func (Role) SwaggerDoc() map[string]string {
var map_RoleBinding = map[string]string{
"": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.",
"metadata": "Standard object's metadata.",
"subject": "Subjects holds references to the objects the role applies to.",
"subjects": "Subjects holds references to the objects the role applies to.",
"roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
}

View File

@ -17,11 +17,12 @@ limitations under the License.
package validation
import (
"errors"
"fmt"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/auth/user"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
@ -66,7 +67,7 @@ func ConfirmNoEscalation(ctx api.Context, ruleResolver AuthorizationRuleResolver
ownerRightsCover, missingRights := Covers(ownerRules, rules)
if !ownerRightsCover {
user, _ := api.UserFrom(ctx)
return errors.NewUnauthorized(fmt.Sprintf("attempt to grant extra privileges: %v user=%v ownerrules=%v ruleResolutionErrors=%v", missingRights, user, ownerRules, ruleResolutionErrors))
return apierrors.NewUnauthorized(fmt.Sprintf("attempt to grant extra privileges: %v user=%v ownerrules=%v ruleResolutionErrors=%v", missingRights, user, ownerRules, ruleResolutionErrors))
}
return nil
}
@ -206,3 +207,79 @@ func appliesToUser(user user.Info, subject rbac.Subject) (bool, error) {
return false, fmt.Errorf("unknown subject kind: %s", subject.Kind)
}
}
// NewTestRuleResolver returns a rule resolver from lists of role objects.
func NewTestRuleResolver(roles []rbac.Role, roleBindings []rbac.RoleBinding, clusterRoles []rbac.ClusterRole, clusterRoleBindings []rbac.ClusterRoleBinding) AuthorizationRuleResolver {
r := staticRoles{
roles: roles,
roleBindings: roleBindings,
clusterRoles: clusterRoles,
clusterRoleBindings: clusterRoleBindings,
}
return newMockRuleResolver(&r)
}
func newMockRuleResolver(r *staticRoles) AuthorizationRuleResolver {
return NewDefaultRuleResolver(r, r, r, r)
}
type staticRoles struct {
roles []rbac.Role
roleBindings []rbac.RoleBinding
clusterRoles []rbac.ClusterRole
clusterRoleBindings []rbac.ClusterRoleBinding
}
func (r *staticRoles) GetRole(ctx api.Context, id string) (*rbac.Role, error) {
namespace, ok := api.NamespaceFrom(ctx)
if !ok || namespace == "" {
return nil, errors.New("must provide namespace when getting role")
}
for _, role := range r.roles {
if role.Namespace == namespace && role.Name == id {
return &role, nil
}
}
return nil, errors.New("role not found")
}
func (r *staticRoles) GetClusterRole(ctx api.Context, id string) (*rbac.ClusterRole, error) {
namespace, ok := api.NamespaceFrom(ctx)
if ok && namespace != "" {
return nil, errors.New("cannot provide namespace when getting cluster role")
}
for _, clusterRole := range r.clusterRoles {
if clusterRole.Namespace == namespace && clusterRole.Name == id {
return &clusterRole, nil
}
}
return nil, errors.New("role not found")
}
func (r *staticRoles) ListRoleBindings(ctx api.Context, options *api.ListOptions) (*rbac.RoleBindingList, error) {
namespace, ok := api.NamespaceFrom(ctx)
if !ok || namespace == "" {
return nil, errors.New("must provide namespace when listing role bindings")
}
roleBindingList := new(rbac.RoleBindingList)
for _, roleBinding := range r.roleBindings {
if roleBinding.Namespace != namespace {
continue
}
// TODO(ericchiang): need to implement label selectors?
roleBindingList.Items = append(roleBindingList.Items, roleBinding)
}
return roleBindingList, nil
}
func (r *staticRoles) ListClusterRoleBindings(ctx api.Context, options *api.ListOptions) (*rbac.ClusterRoleBindingList, error) {
namespace, ok := api.NamespaceFrom(ctx)
if ok && namespace != "" {
return nil, errors.New("cannot list cluster role bindings from within a namespace")
}
clusterRoleBindings := new(rbac.ClusterRoleBindingList)
clusterRoleBindings.Items = make([]rbac.ClusterRoleBinding, len(r.clusterRoleBindings))
copy(clusterRoleBindings.Items, r.clusterRoleBindings)
return clusterRoleBindings, nil
}

View File

@ -255,9 +255,9 @@ type stripVersionEncoder struct {
serializer runtime.Serializer
}
func (c stripVersionEncoder) EncodeToStream(obj runtime.Object, w io.Writer, overrides ...unversioned.GroupVersion) error {
func (c stripVersionEncoder) Encode(obj runtime.Object, w io.Writer) error {
buf := bytes.NewBuffer([]byte{})
err := c.encoder.EncodeToStream(obj, buf, overrides...)
err := c.encoder.Encode(obj, buf)
if err != nil {
return err
}
@ -268,7 +268,7 @@ func (c stripVersionEncoder) EncodeToStream(obj runtime.Object, w io.Writer, ove
gvk.Group = ""
gvk.Version = ""
roundTrippedObj.GetObjectKind().SetGroupVersionKind(*gvk)
return c.serializer.EncodeToStream(roundTrippedObj, w)
return c.serializer.Encode(roundTrippedObj, w)
}
// StripVersionNegotiatedSerializer will return stripVersionEncoder when
@ -443,7 +443,7 @@ func writeNegotiated(s runtime.NegotiatedSerializer, gv unversioned.GroupVersion
w.WriteHeader(statusCode)
encoder := s.EncoderForVersion(serializer, gv)
if err := encoder.EncodeToStream(object, w); err != nil {
if err := encoder.Encode(object, w); err != nil {
errorJSONFatal(err, encoder, w)
}
}

View File

@ -143,16 +143,13 @@ func NewAuthorizerFromAuthorizationConfig(authorizationModes []string, config Au
}
authorizers = append(authorizers, webhookAuthorizer)
case ModeRBAC:
rbacAuthorizer, err := rbac.New(
rbacAuthorizer := rbac.New(
config.RBACRoleRegistry,
config.RBACRoleBindingRegistry,
config.RBACClusterRoleRegistry,
config.RBACClusterRoleBindingRegistry,
config.RBACSuperUser,
)
if err != nil {
return nil, err
}
authorizers = append(authorizers, rbacAuthorizer)
default:
return nil, fmt.Errorf("Unknown authorization mode %s specified", authorizationMode)

View File

@ -185,7 +185,7 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
obj := event.Object
s.fixup(obj)
if err := s.embeddedEncoder.EncodeToStream(obj, buf); err != nil {
if err := s.embeddedEncoder.Encode(obj, buf); err != nil {
// unexpected error
utilruntime.HandleError(fmt.Errorf("unable to encode watch object: %v", err))
return
@ -235,7 +235,7 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) {
}
obj := event.Object
s.fixup(obj)
if err := s.embeddedEncoder.EncodeToStream(obj, buf); err != nil {
if err := s.embeddedEncoder.Encode(obj, buf); err != nil {
// unexpected error
utilruntime.HandleError(fmt.Errorf("unable to encode watch object: %v", err))
return
@ -248,7 +248,7 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) {
// the internal event will be versioned by the encoder
*internalEvent = versioned.InternalEvent(event)
if err := s.encoder.EncodeToStream(internalEvent, streamBuf); err != nil {
if err := s.encoder.Encode(internalEvent, streamBuf); err != nil {
// encoding error
utilruntime.HandleError(fmt.Errorf("unable to encode event: %v", err))
s.watching.Stop()

View File

@ -189,12 +189,20 @@ func (f *DeltaFIFO) Delete(obj interface{}) error {
// Don't provide a second report of the same deletion.
return nil
}
} else if _, exists, err := f.knownObjects.GetByKey(id); err == nil && !exists {
// Presumably, this was deleted when a relist happened.
// Don't provide a second report of the same deletion.
// TODO(lavalamp): This may be racy-- we aren't properly locked
// with knownObjects.
return nil
} else {
// We only want to skip the "deletion" action if the object doesn't
// exist in knownObjects and it doesn't have corresponding item in items.
// Note that even if there is a "deletion" action in items, we can ignore it,
// because it will be deduped automatically in "queueActionLocked"
_, exists, err := f.knownObjects.GetByKey(id)
_, itemsExist := f.items[id]
if err == nil && !exists && !itemsExist {
// Presumably, this was deleted when a relist happened.
// Don't provide a second report of the same deletion.
// TODO(lavalamp): This may be racy-- we aren't properly locked
// with knownObjects.
return nil
}
}
return f.queueActionLocked(Deleted, obj)
@ -270,6 +278,13 @@ func isDeletionDup(a, b *Delta) *Delta {
return b
}
// willObjectBeDeletedLocked returns true only if the last delta for the
// given object is Delete. Caller must lock first.
func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool {
deltas := f.items[id]
return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted
}
// queueActionLocked appends to the delta list for the object, calling
// f.deltaCompressor if needed. Caller must lock first.
func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error {
@ -277,6 +292,14 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
if err != nil {
return KeyError{obj, err}
}
// If object is supposed to be deleted (last event is Deleted),
// then we should ignore Sync events, because it would result in
// recreation of this object.
if actionType == Sync && f.willObjectBeDeletedLocked(id) {
return nil
}
newDeltas := append(f.items[id], Delta{actionType, obj})
newDeltas = dedupDeltas(newDeltas)
if f.deltaCompressor != nil {

View File

@ -45,6 +45,8 @@ type Queue interface {
}
// Helper function for popping from Queue.
// WARNING: Do NOT use this function in non-test code to avoid races
// unless you really really really really know what you are doing.
func Pop(queue Queue) interface{} {
var result interface{}
queue.Pop(func(obj interface{}) error {

View File

@ -22,8 +22,6 @@ type EndpointsExpansion interface{}
type LimitRangeExpansion interface{}
type NodeExpansion interface{}
type PersistentVolumeExpansion interface{}
type PersistentVolumeClaimExpansion interface{}

View File

@ -0,0 +1,40 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package unversioned
import "k8s.io/kubernetes/pkg/api"
// The NodeExpansion interface allows manually adding extra methods to the NodeInterface.
type NodeExpansion interface {
// PatchStatus modifies the status of an existing node. It returns the copy
// of the node that the server returns, or an error.
PatchStatus(nodeName string, data []byte) (*api.Node, error)
}
// PatchStatus modifies the status of an existing node. It returns the copy of
// the node that the server returns, or an error.
func (c *nodes) PatchStatus(nodeName string, data []byte) (*api.Node, error) {
result := &api.Node{}
err := c.client.Patch(api.StrategicMergePatchType).
Resource("nodes").
Name(nodeName).
SubResource("status").
Body(data).
Do().
Into(result)
return result, err
}

View File

@ -250,6 +250,7 @@ func (le *LeaderElector) tryAcquireOrRenew() bool {
e, err := le.config.Client.Endpoints(le.config.EndpointsMeta.Namespace).Get(le.config.EndpointsMeta.Name)
if err != nil {
if !errors.IsNotFound(err) {
glog.Errorf("error retrieving endpoint: %v", err)
return false
}

View File

@ -253,8 +253,8 @@ func (dynamicCodec) Decode(data []byte, gvk *unversioned.GroupVersionKind, obj r
return obj, gvk, nil
}
func (dynamicCodec) EncodeToStream(obj runtime.Object, w io.Writer, overrides ...unversioned.GroupVersion) error {
return runtime.UnstructuredJSONScheme.EncodeToStream(obj, w, overrides...)
func (dynamicCodec) Encode(obj runtime.Object, w io.Writer) error {
return runtime.UnstructuredJSONScheme.Encode(obj, w)
}
// paramaterCodec is a codec converts an API object to query

View File

@ -22,7 +22,6 @@ import (
"io"
"net"
"net/url"
"os"
"regexp"
"strconv"
"strings"
@ -1296,12 +1295,9 @@ func (c *AWSCloud) AttachDisk(diskName string, instanceName string, readOnly boo
// Inside the instance, the mountpoint always looks like /dev/xvdX (?)
hostDevice := "/dev/xvd" + string(mountDevice)
// In the EC2 API, it is sometimes is /dev/sdX and sometimes /dev/xvdX
// We are running on the node here, so we check if /dev/xvda exists to determine this
// We are using xvd names (so we are HVM only)
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
ec2Device := "/dev/xvd" + string(mountDevice)
if _, err := os.Stat("/dev/xvda"); os.IsNotExist(err) {
ec2Device = "/dev/sd" + string(mountDevice)
}
// attachEnded is set to true if the attach operation completed
// (successfully or not)
@ -2317,6 +2313,19 @@ func (s *AWSCloud) EnsureLoadBalancer(apiService *api.Service, hosts []string) (
permissions.Insert(permission)
}
// Allow ICMP fragmentation packets, important for MTU discovery
{
permission := &ec2.IpPermission{
IpProtocol: aws.String("icmp"),
FromPort: aws.Int64(3),
ToPort: aws.Int64(4),
IpRanges: []*ec2.IpRange{{CidrIp: aws.String("0.0.0.0/0")}},
}
permissions.Insert(permission)
}
_, err = s.setSecurityGroupIngress(securityGroupID, permissions)
if err != nil {
return nil, err

View File

@ -218,7 +218,7 @@ func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadB
// NOTE The documentation for the AWS API indicates we could get an HTTP 400
// back if a policy of the same name already exists. However, the aws-sdk does not
// seem to return an error to us in these cases. Therefore this will issue an API
// request everytime.
// request every time.
err := s.createProxyProtocolPolicy(loadBalancerName)
if err != nil {
return nil, err

View File

@ -85,18 +85,20 @@ type GCECloud struct {
managedZones []string // List of zones we are spanning (for Ubernetes-Lite, primarily when running on master)
networkURL string
nodeTags []string // List of tags to use on firewall rules for load balancers
nodeInstancePrefix string // If non-"", an advisory prefix for all nodes in the cluster
useMetadataServer bool
operationPollRateLimiter flowcontrol.RateLimiter
}
type Config struct {
Global struct {
TokenURL string `gcfg:"token-url"`
TokenBody string `gcfg:"token-body"`
ProjectID string `gcfg:"project-id"`
NetworkName string `gcfg:"network-name"`
NodeTags []string `gcfg:"node-tags"`
Multizone bool `gcfg:"multizone"`
TokenURL string `gcfg:"token-url"`
TokenBody string `gcfg:"token-body"`
ProjectID string `gcfg:"project-id"`
NetworkName string `gcfg:"network-name"`
NodeTags []string `gcfg:"node-tags"`
NodeInstancePrefix string `gcfg:"node-instance-prefix"`
Multizone bool `gcfg:"multizone"`
}
}
@ -120,7 +122,7 @@ type Disks interface {
// DeleteDisk deletes PD.
DeleteDisk(diskToDelete string) error
// GetAutoLabelsForPD returns labels to apply to PeristentVolume
// GetAutoLabelsForPD returns labels to apply to PersistentVolume
// representing this PD, namely failure domain and zone.
GetAutoLabelsForPD(name string) (map[string]string, error)
}
@ -260,6 +262,7 @@ func newGCECloud(config io.Reader) (*GCECloud, error) {
tokenSource := google.ComputeTokenSource("")
var nodeTags []string
var nodeInstancePrefix string
if config != nil {
var cfg Config
if err := gcfg.ReadInto(&cfg, config); err != nil {
@ -281,19 +284,20 @@ func newGCECloud(config io.Reader) (*GCECloud, error) {
tokenSource = NewAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody)
}
nodeTags = cfg.Global.NodeTags
nodeInstancePrefix = cfg.Global.NodeInstancePrefix
if cfg.Global.Multizone {
managedZones = nil // Use all zones in region
}
}
return CreateGCECloud(projectID, region, zone, managedZones, networkURL, nodeTags, tokenSource, true /* useMetadataServer */)
return CreateGCECloud(projectID, region, zone, managedZones, networkURL, nodeTags, nodeInstancePrefix, tokenSource, true /* useMetadataServer */)
}
// Creates a GCECloud object using the specified parameters.
// If no networkUrl is specified, loads networkName via rest call.
// If no tokenSource is specified, uses oauth2.DefaultTokenSource.
// If managedZones is nil / empty all zones in the region will be managed.
func CreateGCECloud(projectID, region, zone string, managedZones []string, networkURL string, nodeTags []string, tokenSource oauth2.TokenSource, useMetadataServer bool) (*GCECloud, error) {
func CreateGCECloud(projectID, region, zone string, managedZones []string, networkURL string, nodeTags []string, nodeInstancePrefix string, tokenSource oauth2.TokenSource, useMetadataServer bool) (*GCECloud, error) {
if tokenSource == nil {
var err error
tokenSource, err = google.DefaultTokenSource(
@ -348,6 +352,7 @@ func CreateGCECloud(projectID, region, zone string, managedZones []string, netwo
managedZones: managedZones,
networkURL: networkURL,
nodeTags: nodeTags,
nodeInstancePrefix: nodeInstancePrefix,
useMetadataServer: useMetadataServer,
operationPollRateLimiter: operationPollRateLimiter,
}, nil
@ -978,10 +983,16 @@ func (gce *GCECloud) firewallObject(name, region, desc string, sourceRanges nets
for ix := range ports {
allowedPorts[ix] = strconv.Itoa(int(ports[ix].Port))
}
hostTags, err := gce.computeHostTags(hosts)
if err != nil {
return nil, err
// If the node tags to be used for this cluster have been predefined in the
// provider config, just use them. Otherwise, invoke computeHostTags method to get the tags.
hostTags := gce.nodeTags
if len(hostTags) == 0 {
var err error
if hostTags, err = gce.computeHostTags(hosts); err != nil {
return nil, fmt.Errorf("No node tags supplied and also failed to parse the given lists of hosts for tags. Abort creating firewall rule.")
}
}
firewall := &compute.Firewall{
Name: makeFirewallName(name),
Description: desc,
@ -1003,21 +1014,28 @@ func (gce *GCECloud) firewallObject(name, region, desc string, sourceRanges nets
return firewall, nil
}
// If the node tags to be used for this cluster have been predefined in the
// provider config, just use them. Otherwise, grab all tags from all relevant
// instances:
// ComputeHostTags grabs all tags from all instances being added to the pool.
// * The longest tag that is a prefix of the instance name is used
// * If any instance has a prefix tag, all instances must
// * If no instances have a prefix tag, no tags are used
// * If any instance has no matching prefix tag, return error
// Invoking this method to get host tags is risky since it depends on the format
// of the host names in the cluster. Only use it as a fallback if gce.nodeTags
// is unspecified
func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) {
if len(gce.nodeTags) > 0 {
return gce.nodeTags, nil
}
// TODO: We could store the tags in gceInstance, so we could have already fetched it
hostNamesByZone := make(map[string][]string)
hostNamesByZone := make(map[string]map[string]bool) // map of zones -> map of names -> bool (for easy lookup)
nodeInstancePrefix := gce.nodeInstancePrefix
for _, host := range hosts {
hostNamesByZone[host.Zone] = append(hostNamesByZone[host.Zone], host.Name)
if !strings.HasPrefix(host.Name, gce.nodeInstancePrefix) {
glog.Warningf("instance '%s' does not conform to prefix '%s', ignoring filter", host, gce.nodeInstancePrefix)
nodeInstancePrefix = ""
}
z, ok := hostNamesByZone[host.Zone]
if !ok {
z = make(map[string]bool)
hostNamesByZone[host.Zone] = z
}
z[host.Name] = true
}
tags := sets.NewString()
@ -1028,11 +1046,14 @@ func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) {
for ; page == 0 || (pageToken != "" && page < maxPages); page++ {
listCall := gce.service.Instances.List(gce.projectID, zone)
// Add the filter for hosts
listCall = listCall.Filter("name eq (" + strings.Join(hostNames, "|") + ")")
if nodeInstancePrefix != "" {
// Add the filter for hosts
listCall = listCall.Filter("name eq " + nodeInstancePrefix + ".*")
}
// Add the fields we want
listCall = listCall.Fields("items(name,tags)")
// TODO(zmerlynn): Internal bug 29524655
// listCall = listCall.Fields("items(name,tags)")
if pageToken != "" {
listCall = listCall.PageToken(pageToken)
@ -1044,6 +1065,10 @@ func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) {
}
pageToken = res.NextPageToken
for _, instance := range res.Items {
if !hostNames[instance.Name] {
continue
}
longest_tag := ""
for _, tag := range instance.Tags.Items {
if strings.HasPrefix(instance.Name, tag) && len(tag) > len(longest_tag) {
@ -1052,8 +1077,8 @@ func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) {
}
if len(longest_tag) > 0 {
tags.Insert(longest_tag)
} else if len(tags) > 0 {
return nil, fmt.Errorf("Some, but not all, instances have prefix tags (%s is missing)", instance.Name)
} else {
return nil, fmt.Errorf("Could not find any tag that is a prefix of instance name for instance %s", instance.Name)
}
}
}
@ -1061,11 +1086,9 @@ func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) {
glog.Errorf("computeHostTags exceeded maxPages=%d for Instances.List: truncating.", maxPages)
}
}
if len(tags) == 0 {
glog.V(2).Info("No instances had tags, creating rule without target tags")
return nil, fmt.Errorf("No instances found")
}
return tags.List(), nil
}
@ -2429,21 +2452,20 @@ type gceDisk struct {
// Gets the named instances, returning cloudprovider.InstanceNotFound if any instance is not found
func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error) {
instances := make(map[string]*gceInstance)
remaining := len(names)
nodeInstancePrefix := gce.nodeInstancePrefix
for _, name := range names {
name = canonicalizeInstanceName(name)
if !strings.HasPrefix(name, gce.nodeInstancePrefix) {
glog.Warningf("instance '%s' does not conform to prefix '%s', removing filter", name, gce.nodeInstancePrefix)
nodeInstancePrefix = ""
}
instances[name] = nil
}
for _, zone := range gce.managedZones {
var remaining []string
for name, instance := range instances {
if instance == nil {
remaining = append(remaining, name)
}
}
if len(remaining) == 0 {
if remaining == 0 {
break
}
@ -2452,10 +2474,13 @@ func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error)
for ; page == 0 || (pageToken != "" && page < maxPages); page++ {
listCall := gce.service.Instances.List(gce.projectID, zone)
// Add the filter for hosts
listCall = listCall.Filter("name eq (" + strings.Join(remaining, "|") + ")")
if nodeInstancePrefix != "" {
// Add the filter for hosts
listCall = listCall.Filter("name eq " + nodeInstancePrefix + ".*")
}
listCall = listCall.Fields("items(name,id,disks,machineType)")
// TODO(zmerlynn): Internal bug 29524655
// listCall = listCall.Fields("items(name,id,disks,machineType)")
if pageToken != "" {
listCall.PageToken(pageToken)
}
@ -2467,6 +2492,10 @@ func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error)
pageToken = res.NextPageToken
for _, i := range res.Items {
name := i.Name
if _, ok := instances[name]; !ok {
continue
}
instance := &gceInstance{
Zone: zone,
Name: name,
@ -2475,6 +2504,7 @@ func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error)
Type: lastComponent(i.MachineType),
}
instances[name] = instance
remaining--
}
}
if page >= maxPages {

View File

@ -44,7 +44,8 @@ const ActivePowerState = "poweredOn"
const DefaultDiskController = "scsi"
const DefaultSCSIControllerType = "lsilogic"
var ErrNoDiskUUIDFound = errors.New("no disk UUID found")
var ErrNoDiskUUIDFound = errors.New("No disk UUID found")
var ErrNoDiskIDFound = errors.New("No vSphere disk ID found")
var ErrNoDevicesFound = errors.New("No devices found")
// VSphere is an implementation of cloud provider Interface for VSphere.
@ -273,7 +274,7 @@ func (i *Instances) List(filter string) ([]string, error) {
return nil, err
}
glog.V(3).Infof("Found %s instances matching %s: %s",
glog.V(3).Infof("found %s instances matching %s: %s",
len(vmList), filter, vmList)
return vmList, nil
@ -435,9 +436,9 @@ func (vs *VSphere) GetZone() (cloudprovider.Zone, error) {
return cloudprovider.Zone{Region: vs.cfg.Global.Datacenter}, nil
}
// Routes returns an implementation of Routes for vSphere.
// Routes returns a false since the interface is not supported for vSphere.
func (vs *VSphere) Routes() (cloudprovider.Routes, bool) {
return nil, true
return nil, false
}
// ScrubDNS filters DNS settings for pods.
@ -525,7 +526,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string
// create a scsi controller if there is not one
newSCSIController, err := vmDevices.CreateSCSIController(vs.cfg.Disk.SCSIControllerType)
if err != nil {
glog.V(3).Infof("Cannot create new SCSI controller - %v", err)
glog.V(3).Infof("cannot create new SCSI controller - %v", err)
return "", "", err
}
configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController()
@ -536,7 +537,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string
// add the scsi controller to virtual machine
err = vm.AddDevice(context.TODO(), newSCSIController)
if err != nil {
glog.V(3).Infof("Cannot add SCSI controller to vm - %v", err)
glog.V(3).Infof("cannot add SCSI controller to vm - %v", err)
// attempt clean up of scsi controller
if vmDevices, err := vm.Device(ctx); err == nil {
cleanUpController(newSCSIController, vmDevices, vm, ctx)
@ -551,7 +552,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string
return "", "", err
}
if diskController, err = vmDevices.FindDiskController(vs.cfg.Disk.DiskController); err != nil {
glog.V(3).Infof("Cannot find disk controller - %v", err)
glog.V(3).Infof("cannot find disk controller - %v", err)
// attempt clean up of scsi controller
cleanUpController(newSCSIController, vmDevices, vm, ctx)
return "", "", err
@ -562,12 +563,11 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string
disk := vmDevices.CreateDisk(diskController, ds.Reference(), vmDiskPath)
backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent)
disk = vmDevices.ChildDisk(disk)
// Attach disk to the VM
err = vm.AddDevice(ctx, disk)
if err != nil {
glog.V(3).Infof("Cannot add disk to the vm - %v", err)
glog.V(3).Infof("cannot attach disk to the vm - %v", err)
if newSCSICreated {
cleanUpController(newSCSIController, vmDevices, vm, ctx)
}
@ -611,13 +611,29 @@ func getVirtualDiskUUID(newDevice types.BaseVirtualDevice) (string, error) {
if b, ok := vd.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
uuidWithNoHypens := strings.Replace(b.Uuid, "-", "", -1)
return uuidWithNoHypens, nil
return strings.ToLower(uuidWithNoHypens), nil
}
return "", ErrNoDiskUUIDFound
}
func getVirtualDiskID(volPath string, vmDevices object.VirtualDeviceList) (string, error) {
// filter vm devices to retrieve disk ID for the given vmdk file
for _, device := range vmDevices {
if vmDevices.TypeName(device) == "VirtualDisk" {
d := device.GetVirtualDevice()
if b, ok := d.Backing.(types.BaseVirtualDeviceFileBackingInfo); ok {
fileName := b.GetVirtualDeviceFileBackingInfo().FileName
if fileName == volPath {
return vmDevices.Name(device), nil
}
}
}
}
return "", ErrNoDiskIDFound
}
// Detaches given virtual disk volume from the compute running kubelet.
func (vs *VSphere) DetachDisk(diskID string, nodeName string) error {
func (vs *VSphere) DetachDisk(volPath string, nodeName string) error {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -642,13 +658,19 @@ func (vs *VSphere) DetachDisk(diskID string, nodeName string) error {
return err
}
diskID, err := getVirtualDiskID(volPath, vmDevices)
if err != nil {
glog.Warningf("disk ID not found for %v ", volPath)
return err
}
// Remove disk from VM
device := vmDevices.Find(diskID)
if device == nil {
return fmt.Errorf("device '%s' not found", diskID)
}
err = vm.RemoveDevice(ctx, false, device)
err = vm.RemoveDevice(ctx, true, device)
if err != nil {
return err
}

View File

@ -319,7 +319,7 @@ func (e *EndpointController) syncService(key string) {
if !e.podStoreSynced() {
// Sleep so we give the pod reflector goroutine a chance to run.
time.Sleep(PodStoreSyncedPollPeriod)
glog.Infof("Waiting for pods controller to sync, requeuing rc %v", key)
glog.Infof("Waiting for pods controller to sync, requeuing service %v", key)
e.queue.Add(key)
return
}

View File

@ -256,7 +256,7 @@ func (gc *GarbageCollector) orhpanDependents(owner objectReference, dependents [
if len(failedDependents) != 0 {
return fmt.Errorf("failed to orphan dependents of owner %s, got errors: %s", owner, utilerrors.NewAggregate(errorsSlice).Error())
}
fmt.Println("CHAO: successfully updated all dependents")
glog.V(6).Infof("successfully updated all dependents")
return nil
}

View File

@ -61,7 +61,7 @@ const (
podCIDRUpdateRetry = 5
// controls how often NodeController will try to evict Pods from non-responsive Nodes.
nodeEvictionPeriod = 100 * time.Millisecond
// controlls how many NodeSpec updates NC can process in any moment.
// controls how many NodeSpec updates NC can process in any moment.
cidrUpdateWorkers = 10
cidrUpdateQueueSize = 5000
)

View File

@ -106,18 +106,19 @@ const createProvisionedPVInterval = 10 * time.Second
// framework.Controllers that watch PersistentVolume and PersistentVolumeClaim
// changes.
type PersistentVolumeController struct {
volumeController *framework.Controller
volumeControllerStopCh chan struct{}
volumeSource cache.ListerWatcher
claimController *framework.Controller
claimControllerStopCh chan struct{}
claimSource cache.ListerWatcher
kubeClient clientset.Interface
eventRecorder record.EventRecorder
cloud cloudprovider.Interface
recyclePluginMgr vol.VolumePluginMgr
provisioner vol.ProvisionableVolumePlugin
clusterName string
volumeController *framework.Controller
volumeControllerStopCh chan struct{}
volumeSource cache.ListerWatcher
claimController *framework.Controller
claimControllerStopCh chan struct{}
claimSource cache.ListerWatcher
kubeClient clientset.Interface
eventRecorder record.EventRecorder
cloud cloudprovider.Interface
recyclePluginMgr vol.VolumePluginMgr
provisioner vol.ProvisionableVolumePlugin
enableDynamicProvisioning bool
clusterName string
// Cache of the last known version of volumes and claims. This cache is
// thread safe as long as the volumes/claims there are not modified, they
@ -337,7 +338,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume)
if volume.Spec.ClaimRef == nil {
// Volume is unused
glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is unused", volume.Name)
if _, err := ctrl.updateVolumePhase(volume, api.VolumeAvailable); err != nil {
if _, err := ctrl.updateVolumePhase(volume, api.VolumeAvailable, ""); err != nil {
// Nothing was saved; we will fall back into the same
// condition in the next call to this method
return err
@ -349,7 +350,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume)
// The PV is reserved for a PVC; that PVC has not yet been
// bound to this PV; the PVC sync will handle it.
glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is pre-bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
if _, err := ctrl.updateVolumePhase(volume, api.VolumeAvailable); err != nil {
if _, err := ctrl.updateVolumePhase(volume, api.VolumeAvailable, ""); err != nil {
// Nothing was saved; we will fall back into the same
// condition in the next call to this method
return err
@ -394,7 +395,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume)
if volume.Status.Phase != api.VolumeReleased && volume.Status.Phase != api.VolumeFailed {
// Also, log this only once:
glog.V(2).Infof("volume %q is released and reclaim policy %q will be executed", volume.Name, volume.Spec.PersistentVolumeReclaimPolicy)
if volume, err = ctrl.updateVolumePhase(volume, api.VolumeReleased); err != nil {
if volume, err = ctrl.updateVolumePhase(volume, api.VolumeReleased, ""); err != nil {
// Nothing was saved; we will fall back into the same condition
// in the next call to this method
return err
@ -435,7 +436,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume)
} else if claim.Spec.VolumeName == volume.Name {
// Volume is bound to a claim properly, update status if necessary
glog.V(4).Infof("synchronizing PersistentVolume[%s]: all is bound", volume.Name)
if _, err = ctrl.updateVolumePhase(volume, api.VolumeBound); err != nil {
if _, err = ctrl.updateVolumePhase(volume, api.VolumeBound, ""); err != nil {
// Nothing was saved; we will fall back into the same
// condition in the next call to this method
return err
@ -530,7 +531,7 @@ func (ctrl *PersistentVolumeController) updateClaimPhaseWithEvent(claim *api.Per
return nil, err
}
// Emit the event only when the status change happens, not everytime
// Emit the event only when the status change happens, not every time
// syncClaim is called.
glog.V(3).Infof("claim %q changed status to %q: %s", claimToClaimKey(claim), phase, message)
ctrl.eventRecorder.Event(newClaim, eventtype, reason, message)
@ -539,7 +540,7 @@ func (ctrl *PersistentVolumeController) updateClaimPhaseWithEvent(claim *api.Per
}
// updateVolumePhase saves new volume phase to API server.
func (ctrl *PersistentVolumeController) updateVolumePhase(volume *api.PersistentVolume, phase api.PersistentVolumePhase) (*api.PersistentVolume, error) {
func (ctrl *PersistentVolumeController) updateVolumePhase(volume *api.PersistentVolume, phase api.PersistentVolumePhase, message string) (*api.PersistentVolume, error) {
glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s", volume.Name, phase)
if volume.Status.Phase == phase {
// Nothing to do.
@ -557,6 +558,8 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *api.Persistent
}
volumeClone.Status.Phase = phase
volumeClone.Status.Message = message
newVol, err := ctrl.kubeClient.Core().PersistentVolumes().UpdateStatus(volumeClone)
if err != nil {
glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err)
@ -582,12 +585,12 @@ func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *api.P
return volume, nil
}
newVol, err := ctrl.updateVolumePhase(volume, phase)
newVol, err := ctrl.updateVolumePhase(volume, phase, message)
if err != nil {
return nil, err
}
// Emit the event only when the status change happens, not everytime
// Emit the event only when the status change happens, not every time
// syncClaim is called.
glog.V(3).Infof("volume %q changed status to %q: %s", volume.Name, phase, message)
ctrl.eventRecorder.Event(newVol, eventtype, reason, message)
@ -741,7 +744,7 @@ func (ctrl *PersistentVolumeController) bind(volume *api.PersistentVolume, claim
}
volume = updatedVolume
if updatedVolume, err = ctrl.updateVolumePhase(volume, api.VolumeBound); err != nil {
if updatedVolume, err = ctrl.updateVolumePhase(volume, api.VolumeBound, ""); err != nil {
glog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume status: %v", volume.Name, claimToClaimKey(claim), err)
return err
}
@ -774,7 +777,7 @@ func (ctrl *PersistentVolumeController) bind(volume *api.PersistentVolume, claim
func (ctrl *PersistentVolumeController) unbindVolume(volume *api.PersistentVolume) error {
glog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
// Save the PV only when any modification is neccesary.
// Save the PV only when any modification is neccessary.
clone, err := conversion.NewCloner().DeepCopy(volume)
if err != nil {
return fmt.Errorf("Error cloning pv: %v", err)
@ -811,7 +814,7 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *api.PersistentVolum
glog.V(4).Infof("updating PersistentVolume[%s]: rolled back", newVol.Name)
// Update the status
_, err = ctrl.updateVolumePhase(newVol, api.VolumeAvailable)
_, err = ctrl.updateVolumePhase(newVol, api.VolumeAvailable, "")
return err
}
@ -1060,8 +1063,11 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *api.PersistentVol
return nil
}
// provisionClaim starts new asynchronous operation to provision a claim.
// provisionClaim starts new asynchronous operation to provision a claim if provisioning is enabled.
func (ctrl *PersistentVolumeController) provisionClaim(claim *api.PersistentVolumeClaim) error {
if !ctrl.enableDynamicProvisioning {
return nil
}
glog.V(4).Infof("provisionClaim[%s]: started", claimToClaimKey(claim))
opName := fmt.Sprintf("provision-%s[%s]", claimToClaimKey(claim), string(claim.UID))
ctrl.scheduleOperation(opName, func() error {
@ -1154,7 +1160,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa
// Add annBoundByController (used in deleting the volume)
setAnnotation(&volume.ObjectMeta, annBoundByController, "yes")
setAnnotation(&volume.ObjectMeta, annDynamicallyProvisioned, plugin.Name())
setAnnotation(&volume.ObjectMeta, annDynamicallyProvisioned, plugin.GetPluginName())
// Try to create the PV object several times
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {

View File

@ -52,6 +52,7 @@ func NewPersistentVolumeController(
clusterName string,
volumeSource, claimSource cache.ListerWatcher,
eventRecorder record.EventRecorder,
enableDynamicProvisioning bool,
) *PersistentVolumeController {
if eventRecorder == nil {
@ -65,9 +66,10 @@ func NewPersistentVolumeController(
claims: cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc),
kubeClient: kubeClient,
eventRecorder: eventRecorder,
runningOperations: goroutinemap.NewGoRoutineMap(),
runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */),
cloud: cloud,
provisioner: provisioner,
enableDynamicProvisioning: enableDynamicProvisioning,
clusterName: clusterName,
createProvisionedPVRetryCount: createProvisionedPVRetryCount,
createProvisionedPVInterval: createProvisionedPVInterval,
@ -128,7 +130,7 @@ func NewPersistentVolumeController(
return controller
}
// initalizeCaches fills all controller caches with initial data from etcd in
// initializeCaches fills all controller caches with initial data from etcd in
// order to have the caches already filled when first addClaim/addVolume to
// perform initial synchronization of the controller.
func (ctrl *PersistentVolumeController) initializeCaches(volumeSource, claimSource cache.ListerWatcher) {

View File

@ -18,6 +18,7 @@ package persistentvolume
import (
"fmt"
"net"
"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
@ -71,3 +72,11 @@ func (ctrl *PersistentVolumeController) GetWriter() io.Writer {
func (ctrl *PersistentVolumeController) GetHostName() string {
return ""
}
func (ctrl *PersistentVolumeController) GetHostIP() (net.IP, error) {
return nil, fmt.Errorf("PersistentVolumeController.GetHostIP() is not implemented")
}
func (ctrl *PersistentVolumeController) GetRootContext() string {
return ""
}

View File

@ -326,12 +326,8 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
}
curPod := cur.(*api.Pod)
oldPod := old.(*api.Pod)
glog.V(4).Infof("Pod %s updated %+v -> %+v.", curPod.Name, oldPod, curPod)
rs := rsc.getPodReplicaSet(curPod)
if rs == nil {
return
}
glog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
if curPod.DeletionTimestamp != nil {
// when a pod is deleted gracefully it's deletion timestamp is first modified to reflect a grace period,
// and after such time has passed, the kubelet actually deletes it from the store. We receive an update
@ -339,11 +335,17 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
// until the kubelet actually deletes the pod. This is different from the Phase of a pod changing, because
// an rs never initiates a phase change, and so is never asleep waiting for the same.
rsc.deletePod(curPod)
if labelChanged {
// we don't need to check the oldPod.DeletionTimestamp because DeletionTimestamp cannot be unset.
rsc.deletePod(oldPod)
}
return
}
rsc.enqueueReplicaSet(rs)
if !reflect.DeepEqual(curPod.Labels, oldPod.Labels) {
if rs := rsc.getPodReplicaSet(curPod); rs != nil {
rsc.enqueueReplicaSet(rs)
}
if labelChanged {
// If the old and new ReplicaSet are the same, the first one that syncs
// will set expectations preventing any damage from the second.
if oldRS := rsc.getPodReplicaSet(oldPod); oldRS != nil {

View File

@ -353,12 +353,9 @@ func (rm *ReplicationManager) updatePod(old, cur interface{}) {
return
}
curPod := cur.(*api.Pod)
rc := rm.getPodController(curPod)
if rc == nil {
return
}
oldPod := old.(*api.Pod)
glog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
if curPod.DeletionTimestamp != nil {
// when a pod is deleted gracefully it's deletion timestamp is first modified to reflect a grace period,
// and after such time has passed, the kubelet actually deletes it from the store. We receive an update
@ -366,11 +363,18 @@ func (rm *ReplicationManager) updatePod(old, cur interface{}) {
// until the kubelet actually deletes the pod. This is different from the Phase of a pod changing, because
// an rc never initiates a phase change, and so is never asleep waiting for the same.
rm.deletePod(curPod)
if labelChanged {
// we don't need to check the oldPod.DeletionTimestamp because DeletionTimestamp cannot be unset.
rm.deletePod(oldPod)
}
return
}
rm.enqueueController(rc)
if rc := rm.getPodController(curPod); rc != nil {
rm.enqueueController(rc)
}
// Only need to get the old controller if the labels changed.
if !reflect.DeepEqual(curPod.Labels, oldPod.Labels) {
if labelChanged {
// If the old and new rc are the same, the first one that syncs
// will set expectations preventing any damage from the second.
if oldRC := rm.getPodController(oldPod); oldRC != nil {

View File

@ -20,6 +20,7 @@ package volume
import (
"fmt"
"net"
"time"
"github.com/golang/glog"
@ -27,19 +28,20 @@ import (
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/volume/attacherdetacher"
"k8s.io/kubernetes/pkg/controller/volume/cache"
"k8s.io/kubernetes/pkg/controller/volume/reconciler"
"k8s.io/kubernetes/pkg/controller/volume/statusupdater"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/attachdetach"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
const (
// loopPeriod is the ammount of time the reconciler loop waits between
// loopPeriod is the amount of time the reconciler loop waits between
// successive executions
reconcilerLoopPeriod time.Duration = 100 * time.Millisecond
@ -103,13 +105,19 @@ func NewAttachDetachController(
adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr)
adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr)
adc.attacherDetacher = attacherdetacher.NewAttacherDetacher(&adc.volumePluginMgr)
adc.attacherDetacher =
operationexecutor.NewOperationExecutor(
kubeClient,
&adc.volumePluginMgr)
adc.nodeStatusUpdater = statusupdater.NewNodeStatusUpdater(
kubeClient, nodeInformer, adc.actualStateOfWorld)
adc.reconciler = reconciler.NewReconciler(
reconcilerLoopPeriod,
reconcilerMaxWaitForUnmountDuration,
adc.desiredStateOfWorld,
adc.actualStateOfWorld,
adc.attacherDetacher)
adc.attacherDetacher,
adc.nodeStatusUpdater)
return adc, nil
}
@ -152,12 +160,16 @@ type attachDetachController struct {
actualStateOfWorld cache.ActualStateOfWorld
// attacherDetacher is used to start asynchronous attach and operations
attacherDetacher attacherdetacher.AttacherDetacher
attacherDetacher operationexecutor.OperationExecutor
// reconciler is used to run an asynchronous periodic loop to reconcile the
// desiredStateOfWorld with the actualStateOfWorld by triggering attach
// detach operations using the attacherDetacher.
reconciler reconciler.Reconciler
// nodeStatusUpdater is used to update node status with the list of attached
// volumes
nodeStatusUpdater statusupdater.NodeStatusUpdater
}
func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
@ -205,7 +217,7 @@ func (adc *attachDetachController) nodeAdd(obj interface{}) {
}
nodeName := node.Name
if _, exists := node.Annotations[attachdetach.ControllerManagedAnnotation]; exists {
if _, exists := node.Annotations[volumehelper.ControllerManagedAttachAnnotation]; exists {
// Node specifies annotation indicating it should be managed by attach
// detach controller. Add it to desired state of world.
adc.desiredStateOfWorld.AddNode(nodeName)
@ -284,10 +296,11 @@ func (adc *attachDetachController) processPodVolumes(
continue
}
uniquePodName := volumehelper.GetUniquePodName(pod)
if addVolumes {
// Add volume to desired state of world
_, err := adc.desiredStateOfWorld.AddPod(
getUniquePodName(pod), volumeSpec, pod.Spec.NodeName)
uniquePodName, volumeSpec, pod.Spec.NodeName)
if err != nil {
glog.V(10).Infof(
"Failed to add volume %q for pod %q/%q to desiredStateOfWorld. %v",
@ -299,11 +312,11 @@ func (adc *attachDetachController) processPodVolumes(
} else {
// Remove volume from desired state of world
uniqueVolumeName, err := attachdetach.GetUniqueDeviceNameFromSpec(
uniqueVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
attachableVolumePlugin, volumeSpec)
if err != nil {
glog.V(10).Infof(
"Failed to delete volume %q for pod %q/%q from desiredStateOfWorld. GenerateUniqueDeviceName failed with %v",
"Failed to delete volume %q for pod %q/%q from desiredStateOfWorld. GetUniqueVolumeNameFromSpec failed with %v",
podVolume.Name,
pod.Namespace,
pod.Name,
@ -311,7 +324,7 @@ func (adc *attachDetachController) processPodVolumes(
continue
}
adc.desiredStateOfWorld.DeletePod(
getUniquePodName(pod), uniqueVolumeName, pod.Spec.NodeName)
uniquePodName, uniqueVolumeName, pod.Spec.NodeName)
}
}
@ -482,7 +495,7 @@ func (adc *attachDetachController) getPVSpecFromCache(
// corresponding volume in the actual state of the world to indicate that it is
// mounted.
func (adc *attachDetachController) processVolumesInUse(
nodeName string, volumesInUse []api.UniqueDeviceName) {
nodeName string, volumesInUse []api.UniqueVolumeName) {
for _, attachedVolume := range adc.actualStateOfWorld.GetAttachedVolumesForNode(nodeName) {
mounted := false
for _, volumeInUse := range volumesInUse {
@ -501,11 +514,6 @@ func (adc *attachDetachController) processVolumesInUse(
}
}
// getUniquePodName returns a unique name to reference pod by in memory caches
func getUniquePodName(pod *api.Pod) string {
return types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}.String()
}
// VolumeHost implementation
// This is an unfortunate requirement of the current factoring of volume plugin
// initializing code. It requires kubelet specific methods used by the mounting
@ -551,3 +559,11 @@ func (adc *attachDetachController) GetWriter() io.Writer {
func (adc *attachDetachController) GetHostName() string {
return ""
}
func (adc *attachDetachController) GetHostIP() (net.IP, error) {
return nil, fmt.Errorf("GetHostIP() not supported by Attach/Detach controller's VolumeHost implementation")
}
func (adc *attachDetachController) GetRootContext() string {
return ""
}

View File

@ -1,195 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package attacherdetacher implements interfaces that enable triggering attach
// and detach operations on volumes.
package attacherdetacher
import (
"fmt"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/controller/volume/cache"
"k8s.io/kubernetes/pkg/util/goroutinemap"
"k8s.io/kubernetes/pkg/volume"
)
// AttacherDetacher defines a set of operations for attaching or detaching a
// volume from a node.
type AttacherDetacher interface {
// Spawns a new goroutine to execute volume-specific logic to attach the
// volume to the node specified in the volumeToAttach.
// Once attachment completes successfully, the actualStateOfWorld is updated
// to indicate the volume is attached to the node.
// If there is an error indicating the volume is already attached to the
// specified node, attachment is assumed to be successful (plugins are
// responsible for implmenting this behavior).
// All other errors are logged and the goroutine terminates without updating
// actualStateOfWorld (caller is responsible for retrying as needed).
AttachVolume(volumeToAttach cache.VolumeToAttach, actualStateOfWorld cache.ActualStateOfWorld) error
// Spawns a new goroutine to execute volume-specific logic to detach the
// volume from the node specified in volumeToDetach.
// Once detachment completes successfully, the actualStateOfWorld is updated
// to remove the volume/node combo.
// If there is an error indicating the volume is already detached from the
// specified node, detachment is assumed to be successful (plugins are
// responsible for implmenting this behavior).
// All other errors are logged and the goroutine terminates without updating
// actualStateOfWorld (caller is responsible for retrying as needed).
DetachVolume(volumeToDetach cache.AttachedVolume, actualStateOfWorld cache.ActualStateOfWorld) error
}
// NewAttacherDetacher returns a new instance of AttacherDetacher.
func NewAttacherDetacher(volumePluginMgr *volume.VolumePluginMgr) AttacherDetacher {
return &attacherDetacher{
volumePluginMgr: volumePluginMgr,
pendingOperations: goroutinemap.NewGoRoutineMap(),
}
}
type attacherDetacher struct {
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
// pendingOperations keeps track of pending attach and detach operations so
// multiple operations are not started on the same volume
pendingOperations goroutinemap.GoRoutineMap
}
func (ad *attacherDetacher) AttachVolume(
volumeToAttach cache.VolumeToAttach,
actualStateOfWorld cache.ActualStateOfWorld) error {
attachFunc, err := ad.generateAttachVolumeFunc(volumeToAttach, actualStateOfWorld)
if err != nil {
return err
}
return ad.pendingOperations.Run(string(volumeToAttach.VolumeName), attachFunc)
}
func (ad *attacherDetacher) DetachVolume(
volumeToDetach cache.AttachedVolume,
actualStateOfWorld cache.ActualStateOfWorld) error {
detachFunc, err := ad.generateDetachVolumeFunc(volumeToDetach, actualStateOfWorld)
if err != nil {
return err
}
return ad.pendingOperations.Run(string(volumeToDetach.VolumeName), detachFunc)
}
func (ad *attacherDetacher) generateAttachVolumeFunc(
volumeToAttach cache.VolumeToAttach,
actualStateOfWorld cache.ActualStateOfWorld) (func() error, error) {
// Get attacher plugin
attachableVolumePlugin, err := ad.volumePluginMgr.FindAttachablePluginBySpec(volumeToAttach.VolumeSpec)
if err != nil || attachableVolumePlugin == nil {
return nil, fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeToAttach.VolumeSpec.Name(),
err)
}
volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher()
if newAttacherErr != nil {
return nil, fmt.Errorf(
"failed to get NewAttacher from volumeSpec for volume %q err=%v",
volumeToAttach.VolumeSpec.Name(),
newAttacherErr)
}
return func() error {
// Execute attach
attachErr := volumeAttacher.Attach(volumeToAttach.VolumeSpec, volumeToAttach.NodeName)
if attachErr != nil {
// On failure, just log and exit. The controller will retry
glog.Errorf(
"Attach operation for device %q to node %q failed with: %v",
volumeToAttach.VolumeName, volumeToAttach.NodeName, attachErr)
return attachErr
}
glog.Infof(
"Successfully attached device %q to node %q. Will update actual state of world.",
volumeToAttach.VolumeName, volumeToAttach.NodeName)
// Update actual state of world
_, addVolumeNodeErr := actualStateOfWorld.AddVolumeNode(volumeToAttach.VolumeSpec, volumeToAttach.NodeName)
if addVolumeNodeErr != nil {
// On failure, just log and exit. The controller will retry
glog.Errorf(
"Attach operation for device %q to node %q succeeded, but updating actualStateOfWorld failed with: %v",
volumeToAttach.VolumeName, volumeToAttach.NodeName, addVolumeNodeErr)
return addVolumeNodeErr
}
return nil
}, nil
}
func (ad *attacherDetacher) generateDetachVolumeFunc(
volumeToDetach cache.AttachedVolume,
actualStateOfWorld cache.ActualStateOfWorld) (func() error, error) {
// Get attacher plugin
attachableVolumePlugin, err := ad.volumePluginMgr.FindAttachablePluginBySpec(volumeToDetach.VolumeSpec)
if err != nil || attachableVolumePlugin == nil {
return nil, fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeToDetach.VolumeSpec.Name(),
err)
}
deviceName, err := attachableVolumePlugin.GetDeviceName(volumeToDetach.VolumeSpec)
if err != nil {
return nil, fmt.Errorf(
"failed to GetDeviceName from AttachablePlugin for volumeSpec %q err=%v",
volumeToDetach.VolumeSpec.Name(),
err)
}
volumeDetacher, err := attachableVolumePlugin.NewDetacher()
if err != nil {
return nil, fmt.Errorf(
"failed to get NewDetacher from volumeSpec for volume %q err=%v",
volumeToDetach.VolumeSpec.Name(),
err)
}
return func() error {
// Execute detach
detachErr := volumeDetacher.Detach(deviceName, volumeToDetach.NodeName)
if detachErr != nil {
// On failure, just log and exit. The controller will retry
glog.Errorf(
"Detach operation for device %q from node %q failed with: %v",
volumeToDetach.VolumeName, volumeToDetach.NodeName, detachErr)
return detachErr
}
glog.Infof(
"Successfully detached device %q from node %q. Will update actual state of world.",
volumeToDetach.VolumeName, volumeToDetach.NodeName)
// Update actual state of world
actualStateOfWorld.DeleteVolumeNode(volumeToDetach.VolumeName, volumeToDetach.NodeName)
return nil
}, nil
}

View File

@ -28,17 +28,25 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/attachdetach"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// ActualStateOfWorld defines a set of thread-safe operations supported on
// the attach/detach controller's actual state of the world cache.
// This cache contains volumes->nodes i.e. a set of all volumes and the nodes
// the attach/detach controller believes are successfully attached.
// Note: This is distinct from the ActualStateOfWorld implemented by the kubelet
// volume manager. They both keep track of different objects. This contains
// attach/detach controller specific state.
type ActualStateOfWorld interface {
// ActualStateOfWorld must implement the methods required to allow
// operationexecutor to interact with it.
operationexecutor.ActualStateOfWorldAttacherUpdater
// AddVolumeNode adds the given volume and node to the underlying store
// indicating the specified volume is attached to the specified node.
// A unique volumeName is generated from the volumeSpec and returned on
// A unique volume name is generated from the volumeSpec and returned on
// success.
// If the volume/node combo already exists, the detachRequestedTime is reset
// to zero.
@ -47,7 +55,7 @@ type ActualStateOfWorld interface {
// added.
// If no node with the name nodeName exists in list of attached nodes for
// the specified volume, the node is added.
AddVolumeNode(volumeSpec *volume.Spec, nodeName string) (api.UniqueDeviceName, error)
AddVolumeNode(volumeSpec *volume.Spec, nodeName string, devicePath string) (api.UniqueVolumeName, error)
// SetVolumeMountedByNode sets the MountedByNode value for the given volume
// and node. When set to true this value indicates the volume is mounted by
@ -56,7 +64,7 @@ type ActualStateOfWorld interface {
// returned.
// If no node with the name nodeName exists in list of attached nodes for
// the specified volume, an error is returned.
SetVolumeMountedByNode(volumeName api.UniqueDeviceName, nodeName string, mounted bool) error
SetVolumeMountedByNode(volumeName api.UniqueVolumeName, nodeName string, mounted bool) error
// MarkDesireToDetach returns the difference between the current time and
// the DetachRequestedTime for the given volume/node combo. If the
@ -65,7 +73,14 @@ type ActualStateOfWorld interface {
// returned.
// If no node with the name nodeName exists in list of attached nodes for
// the specified volume, an error is returned.
MarkDesireToDetach(volumeName api.UniqueDeviceName, nodeName string) (time.Duration, error)
MarkDesireToDetach(volumeName api.UniqueVolumeName, nodeName string) (time.Duration, error)
// ResetNodeStatusUpdateNeeded resets statusUpdateNeeded for the specified
// node to false indicating the AttachedVolume field of the Node's Status
// object has been updated.
// If no node with the name nodeName exists in list of attached nodes for
// the specified volume, an error is returned.
ResetNodeStatusUpdateNeeded(nodeName string) error
// DeleteVolumeNode removes the given volume and node from the underlying
// store indicating the specified volume is no longer attached to the
@ -73,12 +88,12 @@ type ActualStateOfWorld interface {
// If the volume/node combo does not exist, this is a no-op.
// If after deleting the node, the specified volume contains no other child
// nodes, the volume is also deleted.
DeleteVolumeNode(volumeName api.UniqueDeviceName, nodeName string)
DeleteVolumeNode(volumeName api.UniqueVolumeName, nodeName string)
// VolumeNodeExists returns true if the specified volume/node combo exists
// in the underlying store indicating the specified volume is attached to
// the specified node.
VolumeNodeExists(volumeName api.UniqueDeviceName, nodeName string) bool
VolumeNodeExists(volumeName api.UniqueVolumeName, nodeName string) bool
// GetAttachedVolumes generates and returns a list of volumes/node pairs
// reflecting which volumes are attached to which nodes based on the
@ -89,19 +104,20 @@ type ActualStateOfWorld interface {
// the specified node reflecting which volumes are attached to that node
// based on the current actual state of the world.
GetAttachedVolumesForNode(nodeName string) []AttachedVolume
// GetVolumesToReportAttached returns a map containing the set of nodes for
// which the VolumesAttached Status field in the Node API object should be
// updated. The key in this map is the name of the node to update and the
// value is list of volumes that should be reported as attached (note that
// this may differ from the actual list of attached volumes for the node
// since volumes should be removed from this list as soon a detach operation
// is considered, before the detach operation is triggered).
GetVolumesToReportAttached() map[string][]api.AttachedVolume
}
// AttachedVolume represents a volume that is attached to a node.
type AttachedVolume struct {
// VolumeName is the unique identifier for the volume that is attached.
VolumeName api.UniqueDeviceName
// VolumeSpec is the volume spec containing the specification for the
// volume that is attached.
VolumeSpec *volume.Spec
// NodeName is the identifier for the node that the volume is attached to.
NodeName string
operationexecutor.AttachedVolume
// MountedByNode indicates that this volume has been been mounted by the
// node and is unsafe to detach.
@ -119,8 +135,9 @@ type AttachedVolume struct {
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
return &actualStateOfWorld{
attachedVolumes: make(map[api.UniqueDeviceName]attachedVolume),
volumePluginMgr: volumePluginMgr,
attachedVolumes: make(map[api.UniqueVolumeName]attachedVolume),
nodesToUpdateStatusFor: make(map[string]nodeToUpdateStatusFor),
volumePluginMgr: volumePluginMgr,
}
}
@ -129,18 +146,26 @@ type actualStateOfWorld struct {
// controller believes to be successfully attached to the nodes it is
// managing. The key in this map is the name of the volume and the value is
// an object containing more information about the attached volume.
attachedVolumes map[api.UniqueDeviceName]attachedVolume
attachedVolumes map[api.UniqueVolumeName]attachedVolume
// nodesToUpdateStatusFor is a map containing the set of nodes for which to
// update the VolumesAttached Status field. The key in this map is the name
// of the node and the value is an object containing more information about
// the node (including the list of volumes to report attached).
nodesToUpdateStatusFor map[string]nodeToUpdateStatusFor
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
sync.RWMutex
}
// The volume object represents a volume the the attach/detach controller
// believes to be succesfully attached to a node it is managing.
// believes to be successfully attached to a node it is managing.
type attachedVolume struct {
// volumeName contains the unique identifier for this volume.
volumeName api.UniqueDeviceName
volumeName api.UniqueVolumeName
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to attach/detach
@ -152,9 +177,12 @@ type attachedVolume struct {
// node and the value is a node object containing more information about
// the node.
nodesAttachedTo map[string]nodeAttachedTo
// devicePath contains the path on the node where the volume is attached
devicePath string
}
// The nodeAttachedTo object represents a node that .
// The nodeAttachedTo object represents a node that has volumes attached to it.
type nodeAttachedTo struct {
// nodeName contains the name of this node.
nodeName string
@ -173,8 +201,41 @@ type nodeAttachedTo struct {
detachRequestedTime time.Time
}
// nodeToUpdateStatusFor is an object that reflects a node that has one or more
// volume attached. It keeps track of the volumes that should be reported as
// attached in the Node's Status API object.
type nodeToUpdateStatusFor struct {
// nodeName contains the name of this node.
nodeName string
// statusUpdateNeeded indicates that the value of the VolumesAttached field
// in the Node's Status API object should be updated. This should be set to
// true whenever a volume is added or deleted from
// volumesToReportAsAttached. It should be reset whenever the status is
// updated.
statusUpdateNeeded bool
// volumesToReportAsAttached is the list of volumes that should be reported
// as attached in the Node's status (note that this may differ from the
// actual list of attached volumes since volumes should be removed from this
// list as soon a detach operation is considered, before the detach
// operation is triggered).
volumesToReportAsAttached map[api.UniqueVolumeName]api.UniqueVolumeName
}
func (asw *actualStateOfWorld) MarkVolumeAsAttached(
volumeSpec *volume.Spec, nodeName string, devicePath string) error {
_, err := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
return err
}
func (asw *actualStateOfWorld) MarkVolumeAsDetached(
volumeName api.UniqueVolumeName, nodeName string) {
asw.DeleteVolumeNode(volumeName, nodeName)
}
func (asw *actualStateOfWorld) AddVolumeNode(
volumeSpec *volume.Spec, nodeName string) (api.UniqueDeviceName, error) {
volumeSpec *volume.Spec, nodeName string, devicePath string) (api.UniqueVolumeName, error) {
asw.Lock()
defer asw.Unlock()
@ -186,11 +247,11 @@ func (asw *actualStateOfWorld) AddVolumeNode(
err)
}
volumeName, err := attachdetach.GetUniqueDeviceNameFromSpec(
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
attachableVolumePlugin, volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GetUniqueDeviceNameFromSpec for volumeSpec %q err=%v",
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
@ -201,6 +262,7 @@ func (asw *actualStateOfWorld) AddVolumeNode(
volumeName: volumeName,
spec: volumeSpec,
nodesAttachedTo: make(map[string]nodeAttachedTo),
devicePath: devicePath,
}
asw.attachedVolumes[volumeName] = volumeObj
}
@ -220,11 +282,29 @@ func (asw *actualStateOfWorld) AddVolumeNode(
volumeObj.nodesAttachedTo[nodeName] = nodeObj
}
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
if !nodeToUpdateExists {
// Create object if it doesn't exist
nodeToUpdate = nodeToUpdateStatusFor{
nodeName: nodeName,
statusUpdateNeeded: true,
volumesToReportAsAttached: make(map[api.UniqueVolumeName]api.UniqueVolumeName),
}
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
}
_, nodeToUpdateVolumeExists :=
nodeToUpdate.volumesToReportAsAttached[volumeName]
if !nodeToUpdateVolumeExists {
nodeToUpdate.statusUpdateNeeded = true
nodeToUpdate.volumesToReportAsAttached[volumeName] = volumeName
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
}
return volumeName, nil
}
func (asw *actualStateOfWorld) SetVolumeMountedByNode(
volumeName api.UniqueDeviceName, nodeName string, mounted bool) error {
volumeName api.UniqueVolumeName, nodeName string, mounted bool) error {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
@ -262,7 +342,7 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode(
}
func (asw *actualStateOfWorld) MarkDesireToDetach(
volumeName api.UniqueDeviceName, nodeName string) (time.Duration, error) {
volumeName api.UniqueVolumeName, nodeName string) (time.Duration, error) {
asw.Lock()
defer asw.Unlock()
@ -287,11 +367,40 @@ func (asw *actualStateOfWorld) MarkDesireToDetach(
volumeObj.nodesAttachedTo[nodeName] = nodeObj
}
// Remove volume from volumes to report as attached
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
if nodeToUpdateExists {
_, nodeToUpdateVolumeExists :=
nodeToUpdate.volumesToReportAsAttached[volumeName]
if nodeToUpdateVolumeExists {
nodeToUpdate.statusUpdateNeeded = true
delete(nodeToUpdate.volumesToReportAsAttached, volumeName)
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
}
}
return time.Since(volumeObj.nodesAttachedTo[nodeName].detachRequestedTime), nil
}
func (asw *actualStateOfWorld) ResetNodeStatusUpdateNeeded(
nodeName string) error {
asw.Lock()
defer asw.Unlock()
// Remove volume from volumes to report as attached
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
if !nodeToUpdateExists {
return fmt.Errorf(
"failed to ResetNodeStatusUpdateNeeded(nodeName=%q) nodeName does not exist",
nodeName)
}
nodeToUpdate.statusUpdateNeeded = false
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
return nil
}
func (asw *actualStateOfWorld) DeleteVolumeNode(
volumeName api.UniqueDeviceName, nodeName string) {
volumeName api.UniqueVolumeName, nodeName string) {
asw.Lock()
defer asw.Unlock()
@ -308,10 +417,22 @@ func (asw *actualStateOfWorld) DeleteVolumeNode(
if len(volumeObj.nodesAttachedTo) == 0 {
delete(asw.attachedVolumes, volumeName)
}
// Remove volume from volumes to report as attached
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
if nodeToUpdateExists {
_, nodeToUpdateVolumeExists :=
nodeToUpdate.volumesToReportAsAttached[volumeName]
if nodeToUpdateVolumeExists {
nodeToUpdate.statusUpdateNeeded = true
delete(nodeToUpdate.volumesToReportAsAttached, volumeName)
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
}
}
}
func (asw *actualStateOfWorld) VolumeNodeExists(
volumeName api.UniqueDeviceName, nodeName string) bool {
volumeName api.UniqueVolumeName, nodeName string) bool {
asw.RLock()
defer asw.RUnlock()
@ -330,16 +451,11 @@ func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {
defer asw.RUnlock()
attachedVolumes := make([]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for volumeName, volumeObj := range asw.attachedVolumes {
for nodeName, nodeObj := range volumeObj.nodesAttachedTo {
for _, volumeObj := range asw.attachedVolumes {
for _, nodeObj := range volumeObj.nodesAttachedTo {
attachedVolumes = append(
attachedVolumes,
AttachedVolume{
NodeName: nodeName,
VolumeName: volumeName,
VolumeSpec: volumeObj.spec,
MountedByNode: nodeObj.mountedByNode,
DetachRequestedTime: nodeObj.detachRequestedTime})
getAttachedVolume(&volumeObj, &nodeObj))
}
}
@ -353,20 +469,54 @@ func (asw *actualStateOfWorld) GetAttachedVolumesForNode(
attachedVolumes := make(
[]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for volumeName, volumeObj := range asw.attachedVolumes {
for _, volumeObj := range asw.attachedVolumes {
for actualNodeName, nodeObj := range volumeObj.nodesAttachedTo {
if actualNodeName == nodeName {
attachedVolumes = append(
attachedVolumes,
AttachedVolume{
NodeName: nodeName,
VolumeName: volumeName,
VolumeSpec: volumeObj.spec,
MountedByNode: nodeObj.mountedByNode,
DetachRequestedTime: nodeObj.detachRequestedTime})
getAttachedVolume(&volumeObj, &nodeObj))
}
}
}
return attachedVolumes
}
func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[string][]api.AttachedVolume {
asw.RLock()
defer asw.RUnlock()
volumesToReportAttached := make(map[string][]api.AttachedVolume)
for _, nodeToUpdateObj := range asw.nodesToUpdateStatusFor {
if nodeToUpdateObj.statusUpdateNeeded {
attachedVolumes := make(
[]api.AttachedVolume,
len(nodeToUpdateObj.volumesToReportAsAttached) /* len */)
i := 0
for _, volume := range nodeToUpdateObj.volumesToReportAsAttached {
attachedVolumes[i] = api.AttachedVolume{
Name: volume,
DevicePath: asw.attachedVolumes[volume].devicePath,
}
i++
}
volumesToReportAttached[nodeToUpdateObj.nodeName] = attachedVolumes
}
}
return volumesToReportAttached
}
func getAttachedVolume(
attachedVolume *attachedVolume,
nodeAttachedTo *nodeAttachedTo) AttachedVolume {
return AttachedVolume{
AttachedVolume: operationexecutor.AttachedVolume{
VolumeName: attachedVolume.volumeName,
VolumeSpec: attachedVolume.spec,
NodeName: nodeAttachedTo.nodeName,
PluginIsAttachable: true,
},
MountedByNode: nodeAttachedTo.mountedByNode,
DetachRequestedTime: nodeAttachedTo.detachRequestedTime}
}

View File

@ -27,7 +27,9 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/attachdetach"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// DesiredStateOfWorld defines a set of thread-safe operations supported on
@ -36,6 +38,9 @@ import (
// managed by the attach/detach controller, volumes are all the volumes that
// should be attached to the specified node, and pods are the pods that
// reference the volume and are scheduled to that node.
// Note: This is distinct from the DesiredStateOfWorld implemented by the
// kubelet volume manager. The both keep track of different objects. This
// contains attach/detach controller specific state.
type DesiredStateOfWorld interface {
// AddNode adds the given node to the list of nodes managed by the attach/
// detach controller.
@ -52,7 +57,7 @@ type DesiredStateOfWorld interface {
// should be attached to the specified node, the volume is implicitly added.
// If no node with the name nodeName exists in list of nodes managed by the
// attach/detach attached controller, an error is returned.
AddPod(podName string, volumeSpec *volume.Spec, nodeName string) (api.UniqueDeviceName, error)
AddPod(podName types.UniquePodName, volumeSpec *volume.Spec, nodeName string) (api.UniqueVolumeName, error)
// DeleteNode removes the given node from the list of nodes managed by the
// attach/detach controller.
@ -70,7 +75,7 @@ type DesiredStateOfWorld interface {
// volumes under the specified node, this is a no-op.
// If after deleting the pod, the specified volume contains no other child
// pods, the volume is also deleted.
DeletePod(podName string, volumeName api.UniqueDeviceName, nodeName string)
DeletePod(podName types.UniquePodName, volumeName api.UniqueVolumeName, nodeName string)
// NodeExists returns true if the node with the specified name exists in
// the list of nodes managed by the attach/detach controller.
@ -79,7 +84,7 @@ type DesiredStateOfWorld interface {
// VolumeExists returns true if the volume with the specified name exists
// in the list of volumes that should be attached to the specified node by
// the attach detach controller.
VolumeExists(volumeName api.UniqueDeviceName, nodeName string) bool
VolumeExists(volumeName api.UniqueVolumeName, nodeName string) bool
// GetVolumesToAttach generates and returns a list of volumes to attach
// and the nodes they should be attached to based on the current desired
@ -89,17 +94,7 @@ type DesiredStateOfWorld interface {
// VolumeToAttach represents a volume that should be attached to a node.
type VolumeToAttach struct {
// VolumeName is the unique identifier for the volume that should be
// attached.
VolumeName api.UniqueDeviceName
// VolumeSpec is a volume spec containing the specification for the volume
// that should be attached.
VolumeSpec *volume.Spec
// NodeName is the identifier for the node that the volume should be
// attached to.
NodeName string
operationexecutor.VolumeToAttach
}
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
@ -130,13 +125,13 @@ type nodeManaged struct {
// volumesToAttach is a map containing the set of volumes that should be
// attached to this node. The key in the map is the name of the volume and
// the value is a pod object containing more information about the volume.
volumesToAttach map[api.UniqueDeviceName]volumeToAttach
volumesToAttach map[api.UniqueVolumeName]volumeToAttach
}
// The volume object represents a volume that should be attached to a node.
type volumeToAttach struct {
// volumeName contains the unique identifier for this volume.
volumeName api.UniqueDeviceName
volumeName api.UniqueVolumeName
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to attach/detach
@ -147,14 +142,14 @@ type volumeToAttach struct {
// volume and are scheduled to the underlying node. The key in the map is
// the name of the pod and the value is a pod object containing more
// information about the pod.
scheduledPods map[string]pod
scheduledPods map[types.UniquePodName]pod
}
// The pod object represents a pod that references the underlying volume and is
// scheduled to the underlying node.
type pod struct {
// podName contains the name of this pod.
podName string
podName types.UniquePodName
}
func (dsw *desiredStateOfWorld) AddNode(nodeName string) {
@ -164,15 +159,15 @@ func (dsw *desiredStateOfWorld) AddNode(nodeName string) {
if _, nodeExists := dsw.nodesManaged[nodeName]; !nodeExists {
dsw.nodesManaged[nodeName] = nodeManaged{
nodeName: nodeName,
volumesToAttach: make(map[api.UniqueDeviceName]volumeToAttach),
volumesToAttach: make(map[api.UniqueVolumeName]volumeToAttach),
}
}
}
func (dsw *desiredStateOfWorld) AddPod(
podName string,
podName types.UniquePodName,
volumeSpec *volume.Spec,
nodeName string) (api.UniqueDeviceName, error) {
nodeName string) (api.UniqueVolumeName, error) {
dsw.Lock()
defer dsw.Unlock()
@ -191,11 +186,11 @@ func (dsw *desiredStateOfWorld) AddPod(
err)
}
volumeName, err := attachdetach.GetUniqueDeviceNameFromSpec(
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
attachableVolumePlugin, volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GenerateUniqueDeviceName for volumeSpec %q err=%v",
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
@ -205,7 +200,7 @@ func (dsw *desiredStateOfWorld) AddPod(
volumeObj = volumeToAttach{
volumeName: volumeName,
spec: volumeSpec,
scheduledPods: make(map[string]pod),
scheduledPods: make(map[types.UniquePodName]pod),
}
dsw.nodesManaged[nodeName].volumesToAttach[volumeName] = volumeObj
}
@ -243,8 +238,8 @@ func (dsw *desiredStateOfWorld) DeleteNode(nodeName string) error {
}
func (dsw *desiredStateOfWorld) DeletePod(
podName string,
volumeName api.UniqueDeviceName,
podName types.UniquePodName,
volumeName api.UniqueVolumeName,
nodeName string) {
dsw.Lock()
defer dsw.Unlock()
@ -282,7 +277,7 @@ func (dsw *desiredStateOfWorld) NodeExists(nodeName string) bool {
}
func (dsw *desiredStateOfWorld) VolumeExists(
volumeName api.UniqueDeviceName, nodeName string) bool {
volumeName api.UniqueVolumeName, nodeName string) bool {
dsw.RLock()
defer dsw.RUnlock()
@ -303,7 +298,12 @@ func (dsw *desiredStateOfWorld) GetVolumesToAttach() []VolumeToAttach {
volumesToAttach := make([]VolumeToAttach, 0 /* len */, len(dsw.nodesManaged) /* cap */)
for nodeName, nodeObj := range dsw.nodesManaged {
for volumeName, volumeObj := range nodeObj.volumesToAttach {
volumesToAttach = append(volumesToAttach, VolumeToAttach{NodeName: nodeName, VolumeName: volumeName, VolumeSpec: volumeObj.spec})
volumesToAttach = append(volumesToAttach,
VolumeToAttach{
VolumeToAttach: operationexecutor.VolumeToAttach{
VolumeName: volumeName,
VolumeSpec: volumeObj.spec,
NodeName: nodeName}})
}
}

View File

@ -23,13 +23,18 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/controller/volume/attacherdetacher"
"k8s.io/kubernetes/pkg/controller/volume/cache"
"k8s.io/kubernetes/pkg/controller/volume/statusupdater"
"k8s.io/kubernetes/pkg/util/goroutinemap"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
)
// Reconciler runs a periodic loop to reconcile the desired state of the with
// the actual state of the world by triggering attach detach operations.
// Note: This is distinct from the Reconciler implemented by the kubelet volume
// manager. This reconciles state for the attach/detach controller. That
// reconciles state for the kubelet volume manager.
type Reconciler interface {
// Starts running the reconciliation loop which executes periodically, checks
// if volumes that should be attached are attached and volumes that should
@ -52,13 +57,15 @@ func NewReconciler(
maxWaitForUnmountDuration time.Duration,
desiredStateOfWorld cache.DesiredStateOfWorld,
actualStateOfWorld cache.ActualStateOfWorld,
attacherDetacher attacherdetacher.AttacherDetacher) Reconciler {
attacherDetacher operationexecutor.OperationExecutor,
nodeStatusUpdater statusupdater.NodeStatusUpdater) Reconciler {
return &reconciler{
loopPeriod: loopPeriod,
maxWaitForUnmountDuration: maxWaitForUnmountDuration,
desiredStateOfWorld: desiredStateOfWorld,
actualStateOfWorld: actualStateOfWorld,
attacherDetacher: attacherDetacher,
nodeStatusUpdater: nodeStatusUpdater,
}
}
@ -67,7 +74,8 @@ type reconciler struct {
maxWaitForUnmountDuration time.Duration
desiredStateOfWorld cache.DesiredStateOfWorld
actualStateOfWorld cache.ActualStateOfWorld
attacherDetacher attacherdetacher.AttacherDetacher
attacherDetacher operationexecutor.OperationExecutor
nodeStatusUpdater statusupdater.NodeStatusUpdater
}
func (rc *reconciler) Run(stopCh <-chan struct{}) {
@ -85,10 +93,22 @@ func (rc *reconciler) reconciliationLoopFunc() func() {
attachedVolume.VolumeName, attachedVolume.NodeName) {
// Volume exists in actual state of world but not desired
if !attachedVolume.MountedByNode {
glog.V(5).Infof("Attempting to start DetachVolume for volume %q to node %q", attachedVolume.VolumeName, attachedVolume.NodeName)
err := rc.attacherDetacher.DetachVolume(attachedVolume, rc.actualStateOfWorld)
glog.V(5).Infof("Attempting to start DetachVolume for volume %q from node %q", attachedVolume.VolumeName, attachedVolume.NodeName)
err := rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, rc.actualStateOfWorld)
if err == nil {
glog.Infof("Started DetachVolume for volume %q to node %q", attachedVolume.VolumeName, attachedVolume.NodeName)
glog.Infof("Started DetachVolume for volume %q from node %q", attachedVolume.VolumeName, attachedVolume.NodeName)
}
if err != nil &&
!goroutinemap.IsAlreadyExists(err) &&
!goroutinemap.IsExponentialBackoff(err) {
// Ignore goroutinemap.IsAlreadyExists && goroutinemap.IsExponentialBackoff errors, they are expected.
// Log all other errors.
glog.Errorf(
"operationExecutor.DetachVolume failed to start for volume %q (spec.Name: %q) from node %q with err: %v",
attachedVolume.VolumeName,
attachedVolume.VolumeSpec.Name(),
attachedVolume.NodeName,
err)
}
} else {
// If volume is not safe to detach (is mounted) wait a max amount of time before detaching any way.
@ -97,10 +117,22 @@ func (rc *reconciler) reconciliationLoopFunc() func() {
glog.Errorf("Unexpected error actualStateOfWorld.MarkDesireToDetach(): %v", err)
}
if timeElapsed > rc.maxWaitForUnmountDuration {
glog.V(5).Infof("Attempting to start DetachVolume for volume %q to node %q. Volume is not safe to detach, but maxWaitForUnmountDuration expired.", attachedVolume.VolumeName, attachedVolume.NodeName)
err := rc.attacherDetacher.DetachVolume(attachedVolume, rc.actualStateOfWorld)
glog.V(5).Infof("Attempting to start DetachVolume for volume %q from node %q. Volume is not safe to detach, but maxWaitForUnmountDuration expired.", attachedVolume.VolumeName, attachedVolume.NodeName)
err := rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, rc.actualStateOfWorld)
if err == nil {
glog.Infof("Started DetachVolume for volume %q to node %q due to maxWaitForUnmountDuration expiry.", attachedVolume.VolumeName, attachedVolume.NodeName)
glog.Infof("Started DetachVolume for volume %q from node %q due to maxWaitForUnmountDuration expiry.", attachedVolume.VolumeName, attachedVolume.NodeName)
}
if err != nil &&
!goroutinemap.IsAlreadyExists(err) &&
!goroutinemap.IsExponentialBackoff(err) {
// Ignore goroutinemap.IsAlreadyExists && goroutinemap.IsExponentialBackoff errors, they are expected.
// Log all other errors.
glog.Errorf(
"operationExecutor.DetachVolume failed to start (maxWaitForUnmountDuration expiry) for volume %q (spec.Name: %q) from node %q with err: %v",
attachedVolume.VolumeName,
attachedVolume.VolumeSpec.Name(),
attachedVolume.NodeName,
err)
}
}
}
@ -114,18 +146,36 @@ func (rc *reconciler) reconciliationLoopFunc() func() {
// Volume/Node exists, touch it to reset detachRequestedTime
glog.V(12).Infof("Volume %q/Node %q is attached--touching.", volumeToAttach.VolumeName, volumeToAttach.NodeName)
_, err := rc.actualStateOfWorld.AddVolumeNode(
volumeToAttach.VolumeSpec, volumeToAttach.NodeName)
volumeToAttach.VolumeSpec, volumeToAttach.NodeName, "" /* devicePath */)
if err != nil {
glog.Errorf("Unexpected error on actualStateOfWorld.AddVolumeNode(): %v", err)
}
} else {
// Volume/Node doesn't exist, spawn a goroutine to attach it
glog.V(5).Infof("Attempting to start AttachVolume for volume %q to node %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
err := rc.attacherDetacher.AttachVolume(volumeToAttach, rc.actualStateOfWorld)
err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld)
if err == nil {
glog.Infof("Started AttachVolume for volume %q to node %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
if err != nil &&
!goroutinemap.IsAlreadyExists(err) &&
!goroutinemap.IsExponentialBackoff(err) {
// Ignore goroutinemap.IsAlreadyExists && goroutinemap.IsExponentialBackoff errors, they are expected.
// Log all other errors.
glog.Errorf(
"operationExecutor.AttachVolume failed to start for volume %q (spec.Name: %q) to node %q with err: %v",
volumeToAttach.VolumeName,
volumeToAttach.VolumeSpec.Name(),
volumeToAttach.NodeName,
err)
}
}
}
// Update Node Status
err := rc.nodeStatusUpdater.UpdateNodeStatuses()
if err != nil {
glog.Infof("UpdateNodeStatuses failed with: %v", err)
}
}
}

View File

@ -0,0 +1,127 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package statusupdater implements interfaces that enable updating the status
// of API objects.
package statusupdater
import (
"encoding/json"
"fmt"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/volume/cache"
"k8s.io/kubernetes/pkg/util/strategicpatch"
)
// NodeStatusUpdater defines a set of operations for updating the
// VolumesAttached field in the Node Status.
type NodeStatusUpdater interface {
// Gets a list of node statuses that should be updated from the actual state
// of the world and updates them.
UpdateNodeStatuses() error
}
// NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.
func NewNodeStatusUpdater(
kubeClient internalclientset.Interface,
nodeInformer framework.SharedInformer,
actualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater {
return &nodeStatusUpdater{
actualStateOfWorld: actualStateOfWorld,
nodeInformer: nodeInformer,
kubeClient: kubeClient,
}
}
type nodeStatusUpdater struct {
kubeClient internalclientset.Interface
nodeInformer framework.SharedInformer
actualStateOfWorld cache.ActualStateOfWorld
}
func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached()
for nodeName, attachedVolumes := range nodesToUpdate {
nodeObj, exists, err := nsu.nodeInformer.GetStore().GetByKey(nodeName)
if nodeObj == nil || !exists || err != nil {
return fmt.Errorf(
"failed to find node %q in NodeInformer cache. %v",
nodeName,
err)
}
node, ok := nodeObj.(*api.Node)
if !ok || node == nil {
return fmt.Errorf(
"failed to cast %q object %#v to Node",
nodeName,
nodeObj)
}
oldData, err := json.Marshal(node)
if err != nil {
return fmt.Errorf(
"failed to Marshal oldData for node %q. %v",
nodeName,
err)
}
node.Status.VolumesAttached = attachedVolumes
newData, err := json.Marshal(node)
if err != nil {
return fmt.Errorf(
"failed to Marshal newData for node %q. %v",
nodeName,
err)
}
patchBytes, err :=
strategicpatch.CreateStrategicMergePatch(oldData, newData, node)
if err != nil {
return fmt.Errorf(
"failed to CreateStrategicMergePatch for node %q. %v",
nodeName,
err)
}
_, err = nsu.kubeClient.Core().Nodes().PatchStatus(nodeName, patchBytes)
if err != nil {
return fmt.Errorf(
"failed to kubeClient.Core().Nodes().Patch for node %q. %v",
nodeName,
err)
}
err = nsu.actualStateOfWorld.ResetNodeStatusUpdateNeeded(nodeName)
if err != nil {
return fmt.Errorf(
"failed to ResetNodeStatusUpdateNeeded for node %q. %v",
nodeName,
err)
}
glog.V(3).Infof(
"Updating status for node %q succeeded. patchBytes: %q",
string(patchBytes))
}
return nil
}

View File

@ -20,6 +20,7 @@ import (
"fmt"
"math"
"strconv"
"strings"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/meta"
@ -27,10 +28,11 @@ import (
)
// formatMap formats map[string]string to a string.
func formatMap(m map[string]string) (fmtStr string) {
func FormatMap(m map[string]string) (fmtStr string) {
for key, value := range m {
fmtStr += fmt.Sprintf("%v=%q\n", key, value)
}
fmtStr = strings.TrimSuffix(fmtStr, "\n")
return
}
@ -51,9 +53,9 @@ func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error)
switch fieldPath {
case "metadata.annotations":
return formatMap(accessor.GetAnnotations()), nil
return FormatMap(accessor.GetAnnotations()), nil
case "metadata.labels":
return formatMap(accessor.GetLabels()), nil
return FormatMap(accessor.GetLabels()), nil
case "metadata.name":
return accessor.GetName(), nil
case "metadata.namespace":

View File

@ -696,7 +696,7 @@ func (s *GenericAPIServer) Run(options *options.ServerRunOptions) {
alternateDNS := []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes"}
// It would be nice to set a fqdn subject alt name, but only the kubelets know, the apiserver is clueless
// alternateDNS = append(alternateDNS, "kubernetes.default.svc.CLUSTER.DNS.NAME")
if shouldGenSelfSignedCerts(options.TLSCertFile, options.TLSPrivateKeyFile) {
if crypto.ShouldGenSelfSignedCerts(options.TLSCertFile, options.TLSPrivateKeyFile) {
if err := crypto.GenerateSelfSignedCert(s.ClusterIP.String(), options.TLSCertFile, options.TLSPrivateKeyFile, alternateIPs, alternateDNS); err != nil {
glog.Errorf("Unable to generate self signed cert: %v", err)
} else {
@ -735,28 +735,6 @@ func (s *GenericAPIServer) Run(options *options.ServerRunOptions) {
glog.Fatal(http.ListenAndServe())
}
// If the file represented by path exists and
// readable, return true otherwise return false.
func canReadFile(path string) bool {
f, err := os.Open(path)
if err != nil {
return false
}
defer f.Close()
return true
}
func shouldGenSelfSignedCerts(certPath, keyPath string) bool {
if canReadFile(certPath) || canReadFile(keyPath) {
glog.Infof("using existing apiserver.crt and apiserver.key files")
return false
}
return true
}
// Exposes the given group version in API.
func (s *GenericAPIServer) InstallAPIGroup(apiGroupInfo *APIGroupInfo) error {
apiPrefix := s.APIGroupPrefix

View File

@ -210,7 +210,7 @@ func (rl *respLogger) recordStatus(status int) {
rl.statusRecorded = true
if rl.logStacktracePred(status) {
// Only log stacks for errors
stack := make([]byte, 2048)
stack := make([]byte, 50*1024)
stack = stack[:runtime.Stack(stack, false)]
rl.statusStack = "\n" + string(stack)
} else {

View File

@ -39,7 +39,7 @@ type ContainerGCPolicy struct {
// Implementation is thread-compatible.
type ContainerGC interface {
// Garbage collect containers.
GarbageCollect() error
GarbageCollect(allSourcesReady bool) error
}
// TODO(vmarmol): Preferentially remove pod infra containers.
@ -63,6 +63,6 @@ func NewContainerGC(runtime Runtime, policy ContainerGCPolicy) (ContainerGC, err
}, nil
}
func (cgc *realContainerGC) GarbageCollect() error {
return cgc.runtime.GarbageCollect(cgc.policy)
func (cgc *realContainerGC) GarbageCollect(allSourcesReady bool) error {
return cgc.runtime.GarbageCollect(cgc.policy, allSourcesReady)
}

View File

@ -74,7 +74,12 @@ type Runtime interface {
// exited and dead containers (used for garbage collection).
GetPods(all bool) ([]*Pod, error)
// GarbageCollect removes dead containers using the specified container gc policy
GarbageCollect(gcPolicy ContainerGCPolicy) error
// If allSourcesReady is not true, it means that kubelet doesn't have the
// complete list of pods from all avialble sources (e.g., apiserver, http,
// file). In this case, garbage collector should refrain itself from aggressive
// behavior such as removing all containers of unrecognized pods (yet).
// TODO: Revisit this method and make it cleaner.
GarbageCollect(gcPolicy ContainerGCPolicy, allSourcesReady bool) error
// Syncs the running pod into the desired pod.
SyncPod(pod *api.Pod, apiPodStatus api.PodStatus, podStatus *PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) PodSyncResult
// KillPod kills all the containers of a pod. Pod may be nil, running pod must not be.

View File

@ -185,7 +185,7 @@ func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByE
}
// GarbageCollect removes dead containers using the specified container gc policy
func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy) error {
func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool) error {
// Separate containers by evict units.
evictUnits, unidentifiedContainers, err := cgc.evictableContainers(gcPolicy.MinAge)
if err != nil {
@ -201,11 +201,13 @@ func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy)
}
}
// Remove deleted pod containers.
for key, unit := range evictUnits {
if cgc.isPodDeleted(key.uid) {
cgc.removeOldestN(unit, len(unit)) // Remove all.
delete(evictUnits, key)
// Remove deleted pod containers if all sources are ready.
if allSourcesReady {
for key, unit := range evictUnits {
if cgc.isPodDeleted(key.uid) {
cgc.removeOldestN(unit, len(unit)) // Remove all.
delete(evictUnits, key)
}
}
}

View File

@ -23,6 +23,7 @@ import (
"path"
"strconv"
"strings"
"time"
dockerref "github.com/docker/distribution/reference"
"github.com/docker/docker/pkg/jsonmessage"
@ -311,8 +312,11 @@ func getDockerClient(dockerEndpoint string) (*dockerapi.Client, error) {
// ConnectToDockerOrDie creates docker client connecting to docker daemon.
// If the endpoint passed in is "fake://", a fake docker client
// will be returned. The program exits if error occurs.
func ConnectToDockerOrDie(dockerEndpoint string) DockerInterface {
// will be returned. The program exits if error occurs. The requestTimeout
// is the timeout for docker requests. If timeout is exceeded, the request
// will be cancelled and throw out an error. If requestTimeout is 0, a default
// value will be applied.
func ConnectToDockerOrDie(dockerEndpoint string, requestTimeout time.Duration) DockerInterface {
if dockerEndpoint == "fake://" {
return NewFakeDockerClient()
}
@ -320,7 +324,8 @@ func ConnectToDockerOrDie(dockerEndpoint string) DockerInterface {
if err != nil {
glog.Fatalf("Couldn't connect to docker: %v", err)
}
return newKubeDockerClient(client)
glog.Infof("Start docker client with request timeout=%v", requestTimeout)
return newKubeDockerClient(client, requestTimeout)
}
// milliCPUToQuota converts milliCPU to CFS quota and period values

View File

@ -49,14 +49,22 @@ import (
// TODO(random-liu): Swith to new docker interface by refactoring the functions in the old DockerInterface
// one by one.
type kubeDockerClient struct {
client *dockerapi.Client
// timeout is the timeout of short running docker operations.
timeout time.Duration
client *dockerapi.Client
}
// Make sure that kubeDockerClient implemented the DockerInterface.
var _ DockerInterface = &kubeDockerClient{}
// There are 2 kinds of docker operations categorized by running time:
// * Long running operation: The long running operation could run for arbitrary long time, and the running time
// usually depends on some uncontrollable factors. These operations include: PullImage, Logs, StartExec, AttachToContainer.
// * Non-long running operation: Given the maximum load of the system, the non-long running operation should finish
// in expected and usually short time. These include all other operations.
// kubeDockerClient only applies timeout on non-long running operations.
const (
// defaultTimeout is the default timeout of all docker operations.
// defaultTimeout is the default timeout of short running docker operations.
defaultTimeout = 2 * time.Minute
// defaultShmSize is the default ShmSize to use (in bytes) if not specified.
@ -69,20 +77,26 @@ const (
// is made for defaultImagePullingStuckTimeout, the image pulling will be cancelled.
// Docker reports image progress for every 512kB block, so normally there shouldn't be too long interval
// between progress updates.
// TODO(random-liu): Make this configurable
defaultImagePullingStuckTimeout = 1 * time.Minute
)
// newKubeDockerClient creates an kubeDockerClient from an existing docker client.
func newKubeDockerClient(dockerClient *dockerapi.Client) DockerInterface {
// newKubeDockerClient creates an kubeDockerClient from an existing docker client. If requestTimeout is 0,
// defaultTimeout will be applied.
func newKubeDockerClient(dockerClient *dockerapi.Client, requestTimeout time.Duration) DockerInterface {
if requestTimeout == 0 {
requestTimeout = defaultTimeout
}
return &kubeDockerClient{
client: dockerClient,
client: dockerClient,
timeout: requestTimeout,
}
}
func (k *kubeDockerClient) ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) {
ctx, cancel := getDefaultContext()
func (d *kubeDockerClient) ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) {
ctx, cancel := d.getTimeoutContext()
defer cancel()
containers, err := k.client.ContainerList(ctx, options)
containers, err := d.client.ContainerList(ctx, options)
if ctxErr := contextError(ctx); ctxErr != nil {
return nil, ctxErr
}
@ -93,7 +107,7 @@ func (k *kubeDockerClient) ListContainers(options dockertypes.ContainerListOptio
}
func (d *kubeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJSON, error) {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getTimeoutContext()
defer cancel()
containerJSON, err := d.client.ContainerInspect(ctx, id)
if ctxErr := contextError(ctx); ctxErr != nil {
@ -109,7 +123,7 @@ func (d *kubeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJS
}
func (d *kubeDockerClient) CreateContainer(opts dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error) {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getTimeoutContext()
defer cancel()
// we provide an explicit default shm size as to not depend on docker daemon.
// TODO: evaluate exposing this as a knob in the API
@ -127,7 +141,7 @@ func (d *kubeDockerClient) CreateContainer(opts dockertypes.ContainerCreateConfi
}
func (d *kubeDockerClient) StartContainer(id string) error {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getTimeoutContext()
defer cancel()
err := d.client.ContainerStart(ctx, id)
if ctxErr := contextError(ctx); ctxErr != nil {
@ -138,7 +152,7 @@ func (d *kubeDockerClient) StartContainer(id string) error {
// Stopping an already stopped container will not cause an error in engine-api.
func (d *kubeDockerClient) StopContainer(id string, timeout int) error {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getTimeoutContext()
defer cancel()
err := d.client.ContainerStop(ctx, id, timeout)
if ctxErr := contextError(ctx); ctxErr != nil {
@ -148,7 +162,7 @@ func (d *kubeDockerClient) StopContainer(id string, timeout int) error {
}
func (d *kubeDockerClient) RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getTimeoutContext()
defer cancel()
err := d.client.ContainerRemove(ctx, id, opts)
if ctxErr := contextError(ctx); ctxErr != nil {
@ -158,7 +172,7 @@ func (d *kubeDockerClient) RemoveContainer(id string, opts dockertypes.Container
}
func (d *kubeDockerClient) InspectImage(image string) (*dockertypes.ImageInspect, error) {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getTimeoutContext()
defer cancel()
resp, _, err := d.client.ImageInspectWithRaw(ctx, image, true)
if ctxErr := contextError(ctx); ctxErr != nil {
@ -174,7 +188,7 @@ func (d *kubeDockerClient) InspectImage(image string) (*dockertypes.ImageInspect
}
func (d *kubeDockerClient) ImageHistory(id string) ([]dockertypes.ImageHistory, error) {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getTimeoutContext()
defer cancel()
resp, err := d.client.ImageHistory(ctx, id)
if ctxErr := contextError(ctx); ctxErr != nil {
@ -184,7 +198,7 @@ func (d *kubeDockerClient) ImageHistory(id string) ([]dockertypes.ImageHistory,
}
func (d *kubeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getTimeoutContext()
defer cancel()
images, err := d.client.ImageList(ctx, opts)
if ctxErr := contextError(ctx); ctxErr != nil {
@ -297,7 +311,7 @@ func (d *kubeDockerClient) PullImage(image string, auth dockertypes.AuthConfig,
return err
}
opts.RegistryAuth = base64Auth
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := d.getCancelableContext()
defer cancel()
resp, err := d.client.ImagePull(ctx, image, opts)
if err != nil {
@ -326,7 +340,7 @@ func (d *kubeDockerClient) PullImage(image string, auth dockertypes.AuthConfig,
}
func (d *kubeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error) {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getTimeoutContext()
defer cancel()
resp, err := d.client.ImageRemove(ctx, image, opts)
if ctxErr := contextError(ctx); ctxErr != nil {
@ -336,7 +350,7 @@ func (d *kubeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemov
}
func (d *kubeDockerClient) Logs(id string, opts dockertypes.ContainerLogsOptions, sopts StreamOptions) error {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getCancelableContext()
defer cancel()
resp, err := d.client.ContainerLogs(ctx, id, opts)
if ctxErr := contextError(ctx); ctxErr != nil {
@ -350,7 +364,7 @@ func (d *kubeDockerClient) Logs(id string, opts dockertypes.ContainerLogsOptions
}
func (d *kubeDockerClient) Version() (*dockertypes.Version, error) {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getTimeoutContext()
defer cancel()
resp, err := d.client.ServerVersion(ctx)
if ctxErr := contextError(ctx); ctxErr != nil {
@ -363,7 +377,7 @@ func (d *kubeDockerClient) Version() (*dockertypes.Version, error) {
}
func (d *kubeDockerClient) Info() (*dockertypes.Info, error) {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getTimeoutContext()
defer cancel()
resp, err := d.client.Info(ctx)
if ctxErr := contextError(ctx); ctxErr != nil {
@ -377,7 +391,7 @@ func (d *kubeDockerClient) Info() (*dockertypes.Info, error) {
// TODO(random-liu): Add unit test for exec and attach functions, just like what go-dockerclient did.
func (d *kubeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.ContainerExecCreateResponse, error) {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getTimeoutContext()
defer cancel()
resp, err := d.client.ContainerExecCreate(ctx, id, opts)
if ctxErr := contextError(ctx); ctxErr != nil {
@ -390,7 +404,7 @@ func (d *kubeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (*
}
func (d *kubeDockerClient) StartExec(startExec string, opts dockertypes.ExecStartCheck, sopts StreamOptions) error {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getCancelableContext()
defer cancel()
if opts.Detach {
err := d.client.ContainerExecStart(ctx, startExec, opts)
@ -414,7 +428,7 @@ func (d *kubeDockerClient) StartExec(startExec string, opts dockertypes.ExecStar
}
func (d *kubeDockerClient) InspectExec(id string) (*dockertypes.ContainerExecInspect, error) {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getTimeoutContext()
defer cancel()
resp, err := d.client.ContainerExecInspect(ctx, id)
if ctxErr := contextError(ctx); ctxErr != nil {
@ -427,7 +441,7 @@ func (d *kubeDockerClient) InspectExec(id string) (*dockertypes.ContainerExecIns
}
func (d *kubeDockerClient) AttachToContainer(id string, opts dockertypes.ContainerAttachOptions, sopts StreamOptions) error {
ctx, cancel := getDefaultContext()
ctx, cancel := d.getCancelableContext()
defer cancel()
resp, err := d.client.ContainerAttach(ctx, id, opts)
if ctxErr := contextError(ctx); ctxErr != nil {
@ -488,16 +502,23 @@ func (d *kubeDockerClient) holdHijackedConnection(tty bool, inputStream io.Reade
return nil
}
// getCancelableContext returns a new cancelable context. For long running requests without timeout, we use cancelable
// context to avoid potential resource leak, although the current implementation shouldn't leak resource.
func (d *kubeDockerClient) getCancelableContext() (context.Context, context.CancelFunc) {
return context.WithCancel(context.Background())
}
// getTimeoutContext returns a new context with default request timeout
func (d *kubeDockerClient) getTimeoutContext() (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), d.timeout)
}
// parseDockerTimestamp parses the timestamp returned by DockerInterface from string to time.Time
func parseDockerTimestamp(s string) (time.Time, error) {
// Timestamp returned by Docker is in time.RFC3339Nano format.
return time.Parse(time.RFC3339Nano, s)
}
func getDefaultContext() (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), defaultTimeout)
}
// contextError checks the context, and returns error if the context is timeout.
func contextError(ctx context.Context) error {
if ctx.Err() == context.DeadlineExceeded {

View File

@ -991,10 +991,10 @@ func (dm *DockerManager) getSecurityOpt(pod *api.Pod, ctrName string) ([]string,
return nil, nil
}
profile, profileOK := pod.ObjectMeta.Annotations["container.seccomp.security.alpha.kubernetes.io/"+ctrName]
profile, profileOK := pod.ObjectMeta.Annotations[api.SeccompContainerAnnotationKeyPrefix+ctrName]
if !profileOK {
// try the pod profile
profile, profileOK = pod.ObjectMeta.Annotations["seccomp.security.alpha.kubernetes.io/pod"]
profile, profileOK = pod.ObjectMeta.Annotations[api.SeccompPodAnnotationKey]
if !profileOK {
// return early the default
return defaultSecurityOpt, nil
@ -1015,9 +1015,11 @@ func (dm *DockerManager) getSecurityOpt(pod *api.Pod, ctrName string) ([]string,
return nil, fmt.Errorf("unknown seccomp profile option: %s", profile)
}
file, err := ioutil.ReadFile(filepath.Join(dm.seccompProfileRoot, strings.TrimPrefix(profile, "localhost/")))
name := strings.TrimPrefix(profile, "localhost/") // by pod annotation validation, name is a valid subpath
fname := filepath.Join(dm.seccompProfileRoot, filepath.FromSlash(name))
file, err := ioutil.ReadFile(fname)
if err != nil {
return nil, err
return nil, fmt.Errorf("cannot load seccomp profile %q: %v", name, err)
}
b := bytes.NewBuffer(nil)
@ -1976,7 +1978,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
podInfraContainerID, err, msg = dm.createPodInfraContainer(pod)
if err != nil {
startContainerResult.Fail(err, msg)
glog.Errorf("Failed to create pod infra container: %v; Skipping pod %q", err, format.Pod(pod))
glog.Errorf("Failed to create pod infra container: %v; Skipping pod %q: %s", err, format.Pod(pod), msg)
return
}
@ -2352,8 +2354,8 @@ func (dm *DockerManager) GetNetNS(containerID kubecontainer.ContainerID) (string
}
// Garbage collection of dead containers
func (dm *DockerManager) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy) error {
return dm.containerGC.GarbageCollect(gcPolicy)
func (dm *DockerManager) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool) error {
return dm.containerGC.GarbageCollect(gcPolicy, allSourcesReady)
}
func (dm *DockerManager) GetPodStatus(uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) {

4
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/OWNERS generated vendored Normal file
View File

@ -0,0 +1,4 @@
assignees:
- derekwaynecarr
- vishh
- dchen1107

View File

@ -71,6 +71,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/kubelet/util/ioutils"
"k8s.io/kubernetes/pkg/kubelet/util/queue"
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volume"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/securitycontext"
"k8s.io/kubernetes/pkg/types"
@ -92,7 +93,7 @@ import (
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/volume"
attachdetachutil "k8s.io/kubernetes/pkg/volume/util/attachdetach"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
@ -151,9 +152,6 @@ const (
// Period for performing image garbage collection.
ImageGCPeriod = 5 * time.Minute
// Maximum period to wait for pod volume setup operations
maxWaitForVolumeOps = 20 * time.Minute
// maxImagesInStatus is the number of max images we store in image status.
maxImagesInNodeStatus = 50
)
@ -208,6 +206,7 @@ func NewMainKubelet(
osInterface kubecontainer.OSInterface,
cgroupRoot string,
containerRuntime string,
runtimeRequestTimeout time.Duration,
rktPath string,
rktAPIEndpoint string,
rktStage1Image string,
@ -299,8 +298,6 @@ func NewMainKubelet(
}
containerRefManager := kubecontainer.NewRefManager()
volumeManager := newVolumeManager()
oomWatcher := NewOOMWatcher(cadvisorInterface, recorder)
// TODO: remove when internal cbr0 implementation gets removed in favor
@ -333,7 +330,6 @@ func NewMainKubelet(
recorder: recorder,
cadvisor: cadvisorInterface,
diskSpaceManager: diskSpaceManager,
volumeManager: volumeManager,
cloud: cloud,
nodeRef: nodeRef,
nodeLabels: nodeLabels,
@ -456,6 +452,7 @@ func NewMainKubelet(
kubecontainer.RealOS{},
imageBackOff,
serializeImagePulls,
runtimeRequestTimeout,
)
if err != nil {
return nil, err
@ -496,10 +493,19 @@ func NewMainKubelet(
containerRefManager,
recorder)
if err := klet.volumePluginMgr.InitPlugins(volumePlugins, &volumeHost{klet}); err != nil {
klet.volumePluginMgr, err =
NewInitializedVolumePluginMgr(klet, volumePlugins)
if err != nil {
return nil, err
}
klet.volumeManager, err = kubeletvolume.NewVolumeManager(
enableControllerAttachDetach,
hostname,
klet.podManager,
klet.kubeClient,
klet.volumePluginMgr)
runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime)
if err != nil {
return nil, err
@ -643,7 +649,7 @@ type Kubelet struct {
runtimeState *runtimeState
// Volume plugins.
volumePluginMgr volume.VolumePluginMgr
volumePluginMgr *volume.VolumePluginMgr
// Network plugin.
networkPlugin network.NetworkPlugin
@ -675,10 +681,12 @@ type Kubelet struct {
// Syncs pods statuses with apiserver; also used as a cache of statuses.
statusManager status.Manager
// Manager for the volume maps for the pods.
volumeManager *volumeManager
// VolumeManager runs a set of asynchronous loops that figure out which
// volumes need to be attached/mounted/unmounted/detached based on the pods
// scheduled on this node and makes it so.
volumeManager kubeletvolume.VolumeManager
//Cloud provider interface
// Cloud provider interface.
cloud cloudprovider.Interface
// Reference to this node.
@ -911,7 +919,7 @@ func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) {
// Starts garbage collection threads.
func (kl *Kubelet) StartGarbageCollection() {
go wait.Until(func() {
if err := kl.containerGC.GarbageCollect(); err != nil {
if err := kl.containerGC.GarbageCollect(kl.sourcesReady.AllReady()); err != nil {
glog.Errorf("Container garbage collection failed: %v", err)
}
}, ContainerGCPeriod, wait.NeverStop)
@ -983,6 +991,9 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
kl.runtimeState.setInitError(err)
}
// Start volume manager
go kl.volumeManager.Run(wait.NeverStop)
if kl.kubeClient != nil {
// Start syncing node status immediately, this may set up things the runtime needs to run.
go wait.Until(kl.syncNodeStatus, kl.nodeStatusUpdateFrequency, wait.NeverStop)
@ -1043,7 +1054,7 @@ func (kl *Kubelet) initialNodeStatus() (*api.Node, error) {
node.Annotations = make(map[string]string)
}
node.Annotations[attachdetachutil.ControllerManagedAnnotation] = "true"
node.Annotations[volumehelper.ControllerManagedAttachAnnotation] = "true"
}
// @question: should this be place after the call to the cloud provider? which also applies labels
@ -1112,11 +1123,16 @@ func (kl *Kubelet) initialNodeStatus() (*api.Node, error) {
if err := kl.setNodeStatus(node); err != nil {
return nil, err
}
return node, nil
}
func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool {
if kl.cloud == nil || kl.flannelExperimentalOverlay {
// TODO: We should have a mechanism to say whether native cloud provider
// is used or whether we are using overlay networking. We should return
// true for cloud providers if they implement Routes() interface and
// we are not using overlay networking.
if kl.cloud == nil || kl.cloud.ProviderName() != "gce" || kl.flannelExperimentalOverlay {
return false
}
_, supported := kl.cloud.Routes()
@ -1143,6 +1159,7 @@ func (kl *Kubelet) registerWithApiserver() {
glog.Errorf("Unable to construct api.Node object for kubelet: %v", err)
continue
}
glog.V(2).Infof("Attempting to register node %s", node.Name)
if _, err := kl.kubeClient.Core().Nodes().Create(node); err != nil {
if !apierrors.IsAlreadyExists(err) {
@ -1405,23 +1422,21 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *api.Pod, container *api.Cont
return nil, err
}
opts.Hostname = hostname
vol, ok := kl.volumeManager.GetVolumes(pod.UID)
if !ok {
return nil, fmt.Errorf("impossible: cannot find the mounted volumes for pod %q", format.Pod(pod))
}
podName := volumehelper.GetUniquePodName(pod)
volumes := kl.volumeManager.GetMountedVolumesForPod(podName)
opts.PortMappings = makePortMappings(container)
// Docker does not relabel volumes if the container is running
// in the host pid or ipc namespaces so the kubelet must
// relabel the volumes
if pod.Spec.SecurityContext != nil && (pod.Spec.SecurityContext.HostIPC || pod.Spec.SecurityContext.HostPID) {
err = kl.relabelVolumes(pod, vol)
err = kl.relabelVolumes(pod, volumes)
if err != nil {
return nil, err
}
}
opts.Mounts, err = makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, vol)
opts.Mounts, err = makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes)
if err != nil {
return nil, err
}
@ -1554,7 +1569,11 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Contain
return result, err
}
case envVar.ValueFrom.ResourceFieldRef != nil:
runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, pod, container)
defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardApi(pod, container)
if err != nil {
return result, err
}
runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, defaultedPod, defaultedContainer)
if err != nil {
return result, err
}
@ -1786,7 +1805,7 @@ func (kl *Kubelet) makePodDataDirs(pod *api.Pod) error {
// * Create a mirror pod if the pod is a static pod, and does not
// already have a mirror pod
// * Create the data directories for the pod if they do not exist
// * Mount volumes and update the volume manager
// * Wait for volumes to attach/mount
// * Fetch the pull secrets for the pod
// * Call the container runtime's SyncPod callback
// * Update the traffic shaping for the pod's ingress and egress limits
@ -1893,9 +1912,12 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
return err
}
// Mount volumes and update the volume manager
podVolumes, err := kl.mountExternalVolumes(pod)
// Wait for volumes to attach/mount
defaultedPod, _, err := kl.defaultPodLimitsForDownwardApi(pod, nil)
if err != nil {
return err
}
if err := kl.volumeManager.WaitForAttachAndMount(defaultedPod); err != nil {
ref, errGetRef := api.GetReference(pod)
if errGetRef == nil && ref != nil {
kl.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err)
@ -1903,7 +1925,6 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
return err
}
}
kl.volumeManager.SetVolumes(pod.UID, podVolumes)
// Fetch the pull secrets for the pod
pullSecrets, err := kl.getPullSecretsForPod(pod)
@ -1967,56 +1988,16 @@ func (kl *Kubelet) getPullSecretsForPod(pod *api.Pod) ([]api.Secret, error) {
return pullSecrets, nil
}
// resolveVolumeName returns the name of the persistent volume (PV) claimed by
// a persistent volume claim (PVC) or an error if the claim is not bound.
// Returns nil if the volume does not use a PVC.
func (kl *Kubelet) resolveVolumeName(pod *api.Pod, volume *api.Volume) (string, error) {
claimSource := volume.VolumeSource.PersistentVolumeClaim
if claimSource != nil {
// resolve real volume behind the claim
claim, err := kl.kubeClient.Core().PersistentVolumeClaims(pod.Namespace).Get(claimSource.ClaimName)
if err != nil {
return "", fmt.Errorf("Cannot find claim %s/%s for volume %s", pod.Namespace, claimSource.ClaimName, volume.Name)
}
if claim.Status.Phase != api.ClaimBound {
return "", fmt.Errorf("Claim for volume %s/%s is not bound yet", pod.Namespace, claimSource.ClaimName)
}
// Use the real bound volume instead of PersistentVolume.Name
return claim.Spec.VolumeName, nil
}
return volume.Name, nil
}
// Stores all volumes defined by the set of pods into a map.
// It stores real volumes there, i.e. persistent volume claims are resolved
// to volumes that are bound to them.
// Keys for each entry are in the format (POD_ID)/(VOLUME_NAME)
func (kl *Kubelet) getDesiredVolumes(pods []*api.Pod) map[string]api.Volume {
desiredVolumes := make(map[string]api.Volume)
for _, pod := range pods {
for _, volume := range pod.Spec.Volumes {
volumeName, err := kl.resolveVolumeName(pod, &volume)
if err != nil {
glog.V(3).Infof("%v", err)
// Ignore the error and hope it's resolved next time
continue
}
identifier := path.Join(string(pod.UID), volumeName)
desiredVolumes[identifier] = volume
}
}
return desiredVolumes
}
// cleanupOrphanedPodDirs removes the volumes of pods that should not be
// running and that have no containers running.
func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*api.Pod, runningPods []*kubecontainer.Pod) error {
active := sets.NewString()
func (kl *Kubelet) cleanupOrphanedPodDirs(
pods []*api.Pod, runningPods []*kubecontainer.Pod) error {
allPods := sets.NewString()
for _, pod := range pods {
active.Insert(string(pod.UID))
allPods.Insert(string(pod.UID))
}
for _, pod := range runningPods {
active.Insert(string(pod.ID))
allPods.Insert(string(pod.ID))
}
found, err := kl.listPodsFromDisk()
@ -2025,16 +2006,19 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*api.Pod, runningPods []*kubeco
}
errlist := []error{}
for _, uid := range found {
if active.Has(string(uid)) {
if allPods.Has(string(uid)) {
continue
}
if volumes, err := kl.getPodVolumes(uid); err != nil || len(volumes) != 0 {
glog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up; err: %v, volumes: %v ", uid, err, volumes)
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist {
// If volumes have not been unmounted/detached, do not delete directory.
// Doing so may result in corruption of data.
glog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up; err: %v", uid, err)
continue
}
glog.V(3).Infof("Orphaned pod %q found, removing", uid)
if err := os.RemoveAll(kl.getPodDir(uid)); err != nil {
glog.Infof("Failed to remove orphaned pod %q dir; err: %v", uid, err)
errlist = append(errlist, err)
}
}
@ -2086,88 +2070,6 @@ func (kl *Kubelet) cleanupBandwidthLimits(allPods []*api.Pod) error {
return nil
}
// Compares the map of current volumes to the map of desired volumes.
// If an active volume does not have a respective desired volume, clean it up.
// This method is blocking:
// 1) it talks to API server to find volumes bound to persistent volume claims
// 2) it talks to cloud to detach volumes
func (kl *Kubelet) cleanupOrphanedVolumes(pods []*api.Pod, runningPods []*kubecontainer.Pod) error {
desiredVolumes := kl.getDesiredVolumes(pods)
currentVolumes := kl.getPodVolumesFromDisk()
runningSet := sets.String{}
for _, pod := range runningPods {
runningSet.Insert(string(pod.ID))
}
for name, cleaner := range currentVolumes {
if _, ok := desiredVolumes[name]; !ok {
parts := strings.Split(name, "/")
if runningSet.Has(parts[0]) {
glog.Infof("volume %q, still has a container running (%q), skipping teardown", name, parts[0])
continue
}
//TODO (jonesdl) We should somehow differentiate between volumes that are supposed
//to be deleted and volumes that are leftover after a crash.
glog.V(3).Infof("Orphaned volume %q found, tearing down volume", name)
// TODO(yifan): Refactor this hacky string manipulation.
kl.volumeManager.DeleteVolumes(types.UID(parts[0]))
// Get path reference count
volumePath := cleaner.Unmounter.GetPath()
refs, err := mount.GetMountRefs(kl.mounter, volumePath)
if err != nil {
glog.Errorf("Could not get mount path references for %q: %v", volumePath, err)
}
//TODO (jonesdl) This should not block other kubelet synchronization procedures
err = cleaner.Unmounter.TearDown()
if err != nil {
glog.Errorf("Could not tear down volume %q at %q: %v", name, volumePath, err)
}
// volume is unmounted. some volumes also require detachment from the node.
if cleaner.Detacher != nil && len(refs) == 1 {
// There is a bug in this code, where len(refs) is zero in some
// cases, and so RemoveVolumeInUse sometimes never gets called.
// The Attach/Detach Refactor should fix this, in the mean time,
// the controller timeout for safe mount is set to 3 minutes, so
// it will still detach the volume.
detacher := *cleaner.Detacher
devicePath, _, err := mount.GetDeviceNameFromMount(kl.mounter, refs[0])
if err != nil {
glog.Errorf("Could not find device path %v", err)
}
if err = detacher.UnmountDevice(refs[0], kl.mounter); err != nil {
glog.Errorf("Could not unmount the global mount for %q: %v", name, err)
}
pdName := path.Base(refs[0])
if kl.enableControllerAttachDetach {
// Attach/Detach controller is enabled and this volume type
// implments a detacher
uniqueDeviceName := attachdetachutil.GetUniqueDeviceName(
cleaner.PluginName, pdName)
kl.volumeManager.RemoveVolumeInUse(
api.UniqueDeviceName(uniqueDeviceName))
} else {
// Attach/Detach controller is disabled
err = detacher.Detach(pdName, kl.hostname)
if err != nil {
glog.Errorf("Could not detach volume %q at %q: %v", name, volumePath, err)
}
}
go func() {
if err = detacher.WaitForDetach(devicePath, maxWaitForVolumeOps); err != nil {
glog.Errorf("Error while waiting for detach: %v", err)
}
}()
}
}
}
return nil
}
// pastActiveDeadline returns true if the pod has been active for more than
// ActiveDeadlineSeconds.
func (kl *Kubelet) pastActiveDeadline(pod *api.Pod) bool {
@ -2360,16 +2262,6 @@ func (kl *Kubelet) HandlePodCleanups() error {
// Note that we pass all pods (including terminated pods) to the function,
// so that we don't remove volumes associated with terminated but not yet
// deleted pods.
err = kl.cleanupOrphanedVolumes(allPods, runningPods)
if err != nil {
glog.Errorf("Failed cleaning up orphaned volumes: %v", err)
return err
}
// Remove any orphaned pod directories.
// Note that we pass all pods (including terminated pods) to the function,
// so that we don't remove directories associated with terminated but not yet
// deleted pods.
err = kl.cleanupOrphanedPodDirs(allPods, runningPods)
if err != nil {
glog.Errorf("Failed cleaning up orphaned pod directories: %v", err)
@ -2459,21 +2351,23 @@ func hasHostPortConflicts(pods []*api.Pod) bool {
// handleOutOfDisk detects if pods can't fit due to lack of disk space.
func (kl *Kubelet) isOutOfDisk() bool {
outOfDockerDisk := false
outOfRootDisk := false
// Check disk space once globally and reject or accept all new pods.
withinBounds, err := kl.diskSpaceManager.IsRuntimeDiskSpaceAvailable()
// Assume enough space in case of errors.
if err == nil && !withinBounds {
outOfDockerDisk = true
if err != nil {
glog.Errorf("Failed to check if disk space is available for the runtime: %v", err)
} else if !withinBounds {
return true
}
withinBounds, err = kl.diskSpaceManager.IsRootDiskSpaceAvailable()
// Assume enough space in case of errors.
if err == nil && !withinBounds {
outOfRootDisk = true
if err != nil {
glog.Errorf("Failed to check if disk space is available on the root partition: %v", err)
} else if !withinBounds {
return true
}
return outOfDockerDisk || outOfRootDisk
return false
}
// matchesNodeSelector returns true if pod matches node's labels.
@ -2845,6 +2739,10 @@ func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *api.Pod
var cID string
cStatus, found := api.GetContainerStatus(podStatus.ContainerStatuses, containerName)
// if not found, check the init containers
if !found {
cStatus, found = api.GetContainerStatus(podStatus.InitContainerStatuses, containerName)
}
if !found {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName)
}

Some files were not shown because too many files have changed in this diff Show More