mirror of https://github.com/k3s-io/k3s.git
Remove BoundPods from Kubelet
parent
972a3b1998
commit
5d95e9e671
|
@ -39,12 +39,8 @@ func init() {
|
|||
*out = *in.Copy()
|
||||
return nil
|
||||
},
|
||||
// Convert ContainerManifest to BoundPod
|
||||
//
|
||||
// This function generates a dummy selfLink using the same method as the
|
||||
// boundPod registry, in order for the Kubelet to work with well-formed
|
||||
// boundPods during the integration test.
|
||||
func(in *ContainerManifest, out *BoundPod, s conversion.Scope) error {
|
||||
// Convert ContainerManifest to Pod
|
||||
func(in *ContainerManifest, out *Pod, s conversion.Scope) error {
|
||||
out.Spec.Containers = in.Containers
|
||||
out.Spec.Volumes = in.Volumes
|
||||
out.Spec.RestartPolicy = in.RestartPolicy
|
||||
|
@ -53,12 +49,12 @@ func init() {
|
|||
out.UID = in.UUID
|
||||
|
||||
if in.ID != "" {
|
||||
out.SelfLink = "/api/v1beta1/boundPods/" + in.ID
|
||||
out.SelfLink = "/api/v1beta1/pods/" + in.ID
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
func(in *BoundPod, out *ContainerManifest, s conversion.Scope) error {
|
||||
func(in *Pod, out *ContainerManifest, s conversion.Scope) error {
|
||||
out.Containers = in.Spec.Containers
|
||||
out.Volumes = in.Spec.Volumes
|
||||
out.RestartPolicy = in.Spec.RestartPolicy
|
||||
|
@ -70,7 +66,7 @@ func init() {
|
|||
},
|
||||
|
||||
// ContainerManifestList
|
||||
func(in *ContainerManifestList, out *BoundPods, s conversion.Scope) error {
|
||||
func(in *ContainerManifestList, out *PodList, s conversion.Scope) error {
|
||||
if err := s.Convert(&in.Items, &out.Items, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -80,7 +76,7 @@ func init() {
|
|||
}
|
||||
return nil
|
||||
},
|
||||
func(in *BoundPods, out *ContainerManifestList, s conversion.Scope) error {
|
||||
func(in *PodList, out *ContainerManifestList, s conversion.Scope) error {
|
||||
if err := s.Convert(&in.Items, &out.Items, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -44,7 +44,6 @@ func init() {
|
|||
&ContainerManifest{},
|
||||
&ContainerManifestList{},
|
||||
&BoundPod{},
|
||||
&BoundPods{},
|
||||
&List{},
|
||||
&LimitRange{},
|
||||
&LimitRangeList{},
|
||||
|
@ -79,7 +78,6 @@ func (*EventList) IsAnAPIObject() {}
|
|||
func (*ContainerManifest) IsAnAPIObject() {}
|
||||
func (*ContainerManifestList) IsAnAPIObject() {}
|
||||
func (*BoundPod) IsAnAPIObject() {}
|
||||
func (*BoundPods) IsAnAPIObject() {}
|
||||
func (*List) IsAnAPIObject() {}
|
||||
func (*LimitRange) IsAnAPIObject() {}
|
||||
func (*LimitRangeList) IsAnAPIObject() {}
|
||||
|
|
|
@ -1249,7 +1249,7 @@ type EventList struct {
|
|||
// ContainerManifest corresponds to the Container Manifest format, documented at:
|
||||
// https://developers.google.com/compute/docs/containers/container_vms#container_manifest
|
||||
// This is used as the representation of Kubernetes workloads.
|
||||
// DEPRECATED: Replaced with BoundPod
|
||||
// DEPRECATED: Replaced with Pod
|
||||
type ContainerManifest struct {
|
||||
// Required: This must be a supported version string, such as "v1beta1".
|
||||
Version string `json:"version"`
|
||||
|
@ -1268,7 +1268,7 @@ type ContainerManifest struct {
|
|||
}
|
||||
|
||||
// ContainerManifestList is used to communicate container manifests to kubelet.
|
||||
// DEPRECATED: Replaced with BoundPods
|
||||
// DEPRECATED: Replaced with Pods
|
||||
type ContainerManifestList struct {
|
||||
TypeMeta `json:",inline"`
|
||||
ListMeta `json:"metadata,omitempty"`
|
||||
|
@ -1279,6 +1279,8 @@ type ContainerManifestList struct {
|
|||
// BoundPod is a collection of containers that should be run on a host. A BoundPod
|
||||
// defines how a Pod may change after a Binding is created. A Pod is a request to
|
||||
// execute a pod, whereas a BoundPod is the specification that would be run on a server.
|
||||
//
|
||||
// TODO(wojtek-t): Get rid of this type.
|
||||
type BoundPod struct {
|
||||
TypeMeta `json:",inline"`
|
||||
ObjectMeta `json:"metadata,omitempty"`
|
||||
|
@ -1287,19 +1289,6 @@ type BoundPod struct {
|
|||
Spec PodSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
// BoundPods is a list of Pods bound to a common server. The resource version of
|
||||
// the pod list is guaranteed to only change when the list of bound pods changes.
|
||||
type BoundPods struct {
|
||||
TypeMeta `json:",inline"`
|
||||
ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// Host is the name of a node that these pods were bound to.
|
||||
Host string `json:"host"`
|
||||
|
||||
// Items is the list of all pods bound to a given host.
|
||||
Items []BoundPod `json:"items"`
|
||||
}
|
||||
|
||||
// List holds a list of objects, which may not be known by the server.
|
||||
type List struct {
|
||||
TypeMeta `json:",inline"`
|
||||
|
|
|
@ -1079,7 +1079,7 @@ type BoundPods struct {
|
|||
Host string `json:"host" description:"name of a node that these pods were bound to"`
|
||||
|
||||
// Items is the list of all pods bound to a given host.
|
||||
Items []BoundPod `json:"items" description:"list of all pods bound to a given host"`
|
||||
Items []Pod `json:"items" description:"list of all pods bound to a given host"`
|
||||
}
|
||||
|
||||
// List holds a list of objects, which may not be known by the server.
|
||||
|
|
|
@ -1141,7 +1141,7 @@ type BoundPods struct {
|
|||
Host string `json:"host" description:"name of a node that these pods were bound to"`
|
||||
|
||||
// Items is the list of all pods bound to a given host.
|
||||
Items []BoundPod `json:"items" description:"list of all pods bound to a given host"`
|
||||
Items []Pod `json:"items" description:"list of all pods bound to a given host"`
|
||||
}
|
||||
|
||||
// List holds a list of objects, which may not be known by the server.
|
||||
|
|
|
@ -671,7 +671,7 @@ type BoundPods struct {
|
|||
Host string `json:"host" description:"name of a node that these pods were bound to"`
|
||||
|
||||
// Items is the list of all pods bound to a given host.
|
||||
Items []BoundPod `json:"items" description:"list of all pods bound to a given host"`
|
||||
Items []Pod `json:"items" description:"list of all pods bound to a given host"`
|
||||
}
|
||||
|
||||
// ReplicationControllerSpec is the specification of a replication controller.
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// NewSourceApiserver creates a config source that watches and pulls from the apiserver.
|
||||
|
@ -35,19 +34,14 @@ func NewSourceApiserver(client *client.Client, hostname string, updates chan<- i
|
|||
// newSourceApiserverFromLW holds creates a config source that watches an pulls from the apiserver.
|
||||
func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}) {
|
||||
send := func(objs []interface{}) {
|
||||
var bpods []api.BoundPod
|
||||
var pods []api.Pod
|
||||
for _, o := range objs {
|
||||
pod := o.(*api.Pod)
|
||||
bpod := api.BoundPod{}
|
||||
if err := api.Scheme.Convert(pod, &bpod); err != nil {
|
||||
glog.Errorf("Unable to interpret Pod from apiserver as a BoundPod: %v: %+v", err, pod)
|
||||
continue
|
||||
}
|
||||
// Make a dummy self link so that references to this bound pod will work.
|
||||
bpod.SelfLink = "/api/v1beta1/boundPods/" + bpod.Name
|
||||
bpods = append(bpods, bpod)
|
||||
// Make a dummy self link so that references to this pod will work.
|
||||
pod.SelfLink = "/api/v1beta1/pods/" + pod.Name
|
||||
pods = append(pods, *pod)
|
||||
}
|
||||
updates <- kubelet.PodUpdate{bpods, kubelet.SET, kubelet.ApiserverSource}
|
||||
updates <- kubelet.PodUpdate{pods, kubelet.SET, kubelet.ApiserverSource}
|
||||
}
|
||||
cache.NewReflector(lw, &api.Pod{}, cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc), 0).Run()
|
||||
}
|
||||
|
|
|
@ -54,14 +54,14 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||
ObjectMeta: api.ObjectMeta{Name: "q"},
|
||||
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/blah"}}}}
|
||||
|
||||
expectedBoundPod1v1 := api.BoundPod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "p", SelfLink: "/api/v1beta1/boundPods/p"},
|
||||
expectedPod1v1 := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "p", SelfLink: "/api/v1beta1/pods/p"},
|
||||
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/one"}}}}
|
||||
expectedBoundPod1v2 := api.BoundPod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "p", SelfLink: "/api/v1beta1/boundPods/p"},
|
||||
expectedPod1v2 := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "p", SelfLink: "/api/v1beta1/pods/p"},
|
||||
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/two"}}}}
|
||||
expectedBoundPod2 := api.BoundPod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "q", SelfLink: "/api/v1beta1/boundPods/q"},
|
||||
expectedPod2 := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "q", SelfLink: "/api/v1beta1/pods/q"},
|
||||
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/blah"}}}}
|
||||
|
||||
// Setup fake api client.
|
||||
|
@ -80,7 +80,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
update := got.(kubelet.PodUpdate)
|
||||
expected := CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedBoundPod1v1)
|
||||
expected := CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedPod1v1)
|
||||
if !api.Semantic.DeepEqual(expected, update) {
|
||||
t.Errorf("Expected %#v; Got %#v", expected, update)
|
||||
}
|
||||
|
@ -93,8 +93,8 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||
}
|
||||
update = got.(kubelet.PodUpdate)
|
||||
// Could be sorted either of these two ways:
|
||||
expectedA := CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedBoundPod1v1, expectedBoundPod2)
|
||||
expectedB := CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedBoundPod2, expectedBoundPod1v1)
|
||||
expectedA := CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedPod1v1, expectedPod2)
|
||||
expectedB := CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedPod2, expectedPod1v1)
|
||||
|
||||
if !api.Semantic.DeepEqual(expectedA, update) && !api.Semantic.DeepEqual(expectedB, update) {
|
||||
t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, update)
|
||||
|
@ -107,8 +107,8 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
update = got.(kubelet.PodUpdate)
|
||||
expectedA = CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedBoundPod1v2, expectedBoundPod2)
|
||||
expectedB = CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedBoundPod2, expectedBoundPod1v2)
|
||||
expectedA = CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedPod1v2, expectedPod2)
|
||||
expectedB = CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedPod2, expectedPod1v2)
|
||||
|
||||
if !api.Semantic.DeepEqual(expectedA, update) && !api.Semantic.DeepEqual(expectedB, update) {
|
||||
t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, update)
|
||||
|
@ -121,7 +121,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
|
|||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
update = got.(kubelet.PodUpdate)
|
||||
expected = CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedBoundPod2)
|
||||
expected = CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedPod2)
|
||||
if !api.Semantic.DeepEqual(expected, update) {
|
||||
t.Errorf("Expected %#v, Got %#v", expected, update)
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ func (c *PodConfig) Sync() {
|
|||
type podStorage struct {
|
||||
podLock sync.RWMutex
|
||||
// map of source name to pod name to pod reference
|
||||
pods map[string]map[string]*api.BoundPod
|
||||
pods map[string]map[string]*api.Pod
|
||||
mode PodConfigNotificationMode
|
||||
|
||||
// ensures that updates are delivered in strict order
|
||||
|
@ -134,7 +134,7 @@ type podStorage struct {
|
|||
// TODO: allow initialization of the current state of the store with snapshotted version.
|
||||
func newPodStorage(updates chan<- kubelet.PodUpdate, mode PodConfigNotificationMode, recorder record.EventRecorder) *podStorage {
|
||||
return &podStorage{
|
||||
pods: make(map[string]map[string]*api.BoundPod),
|
||||
pods: make(map[string]map[string]*api.Pod),
|
||||
mode: mode,
|
||||
updates: updates,
|
||||
sourcesSeen: util.StringSet{},
|
||||
|
@ -169,12 +169,12 @@ func (s *podStorage) Merge(source string, change interface{}) error {
|
|||
s.updates <- *updates
|
||||
}
|
||||
if len(deletes.Pods) > 0 || len(adds.Pods) > 0 {
|
||||
s.updates <- kubelet.PodUpdate{s.MergedState().([]api.BoundPod), kubelet.SET, source}
|
||||
s.updates <- kubelet.PodUpdate{s.MergedState().([]api.Pod), kubelet.SET, source}
|
||||
}
|
||||
|
||||
case PodConfigNotificationSnapshot:
|
||||
if len(updates.Pods) > 0 || len(deletes.Pods) > 0 || len(adds.Pods) > 0 {
|
||||
s.updates <- kubelet.PodUpdate{s.MergedState().([]api.BoundPod), kubelet.SET, source}
|
||||
s.updates <- kubelet.PodUpdate{s.MergedState().([]api.Pod), kubelet.SET, source}
|
||||
}
|
||||
|
||||
default:
|
||||
|
@ -194,7 +194,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
|
|||
|
||||
pods := s.pods[source]
|
||||
if pods == nil {
|
||||
pods = make(map[string]*api.BoundPod)
|
||||
pods = make(map[string]*api.Pod)
|
||||
}
|
||||
|
||||
update := change.(kubelet.PodUpdate)
|
||||
|
@ -246,7 +246,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
|
|||
s.markSourceSet(source)
|
||||
// Clear the old map entries by just creating a new map
|
||||
oldPods := pods
|
||||
pods = make(map[string]*api.BoundPod)
|
||||
pods = make(map[string]*api.Pod)
|
||||
|
||||
filtered := filterInvalidPods(update.Pods, source, s.recorder)
|
||||
for _, ref := range filtered {
|
||||
|
@ -298,12 +298,12 @@ func (s *podStorage) seenSources(sources ...string) bool {
|
|||
return s.sourcesSeen.HasAll(sources...)
|
||||
}
|
||||
|
||||
func filterInvalidPods(pods []api.BoundPod, source string, recorder record.EventRecorder) (filtered []*api.BoundPod) {
|
||||
func filterInvalidPods(pods []api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) {
|
||||
names := util.StringSet{}
|
||||
for i := range pods {
|
||||
pod := &pods[i]
|
||||
var errlist []error
|
||||
if errs := validation.ValidateBoundPod(pod); len(errs) != 0 {
|
||||
if errs := validation.ValidatePod(pod); len(errs) != 0 {
|
||||
errlist = append(errlist, errs...)
|
||||
// If validation fails, don't trust it any further -
|
||||
// even Name could be bad.
|
||||
|
@ -331,27 +331,27 @@ func filterInvalidPods(pods []api.BoundPod, source string, recorder record.Event
|
|||
func (s *podStorage) Sync() {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
s.updates <- kubelet.PodUpdate{s.MergedState().([]api.BoundPod), kubelet.SET, kubelet.AllSource}
|
||||
s.updates <- kubelet.PodUpdate{s.MergedState().([]api.Pod), kubelet.SET, kubelet.AllSource}
|
||||
}
|
||||
|
||||
// Object implements config.Accessor
|
||||
func (s *podStorage) MergedState() interface{} {
|
||||
s.podLock.RLock()
|
||||
defer s.podLock.RUnlock()
|
||||
pods := make([]api.BoundPod, 0)
|
||||
pods := make([]api.Pod, 0)
|
||||
for _, sourcePods := range s.pods {
|
||||
for _, podRef := range sourcePods {
|
||||
pod, err := api.Scheme.Copy(podRef)
|
||||
if err != nil {
|
||||
glog.Errorf("unable to copy pod: %v", err)
|
||||
}
|
||||
pods = append(pods, *pod.(*api.BoundPod))
|
||||
pods = append(pods, *pod.(*api.Pod))
|
||||
}
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
func bestPodIdentString(pod *api.BoundPod) string {
|
||||
func bestPodIdentString(pod *api.Pod) string {
|
||||
namespace := pod.Namespace
|
||||
if namespace == "" {
|
||||
namespace = "<empty-namespace>"
|
||||
|
|
|
@ -39,7 +39,7 @@ func expectEmptyChannel(t *testing.T, ch <-chan interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
type sortedPods []api.BoundPod
|
||||
type sortedPods []api.Pod
|
||||
|
||||
func (s sortedPods) Len() int {
|
||||
return len(s)
|
||||
|
@ -51,8 +51,8 @@ func (s sortedPods) Less(i, j int) bool {
|
|||
return s[i].Namespace < s[j].Namespace
|
||||
}
|
||||
|
||||
func CreateValidPod(name, namespace, source string) api.BoundPod {
|
||||
return api.BoundPod{
|
||||
func CreateValidPod(name, namespace, source string) api.Pod {
|
||||
return api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: types.UID(name), // for the purpose of testing, this is unique enough
|
||||
Name: name,
|
||||
|
@ -66,8 +66,8 @@ func CreateValidPod(name, namespace, source string) api.BoundPod {
|
|||
}
|
||||
}
|
||||
|
||||
func CreatePodUpdate(op kubelet.PodOperation, source string, pods ...api.BoundPod) kubelet.PodUpdate {
|
||||
newPods := make([]api.BoundPod, len(pods))
|
||||
func CreatePodUpdate(op kubelet.PodOperation, source string, pods ...api.Pod) kubelet.PodUpdate {
|
||||
newPods := make([]api.Pod, len(pods))
|
||||
for i := range pods {
|
||||
newPods[i] = pods[i]
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ func TestInvalidPodFiltered(t *testing.T) {
|
|||
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "test")))
|
||||
|
||||
// add an invalid update
|
||||
podUpdate = CreatePodUpdate(kubelet.UPDATE, NoneSource, api.BoundPod{ObjectMeta: api.ObjectMeta{Name: "foo"}})
|
||||
podUpdate = CreatePodUpdate(kubelet.UPDATE, NoneSource, api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}})
|
||||
channel <- podUpdate
|
||||
expectNoPodUpdate(t, ch)
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ func TestNewPodAddedUpdatedRemoved(t *testing.T) {
|
|||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod))
|
||||
|
||||
podUpdate = CreatePodUpdate(kubelet.REMOVE, NoneSource, api.BoundPod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "new"}})
|
||||
podUpdate = CreatePodUpdate(kubelet.REMOVE, NoneSource, api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "new"}})
|
||||
channel <- podUpdate
|
||||
expectPodUpdate(t, ch, CreatePodUpdate(kubelet.REMOVE, NoneSource, pod))
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ func (s *sourceFile) extractFromPath() error {
|
|||
return err
|
||||
}
|
||||
// Emit an update with an empty PodList to allow FileSource to be marked as seen
|
||||
s.updates <- kubelet.PodUpdate{[]api.BoundPod{}, kubelet.SET, kubelet.FileSource}
|
||||
s.updates <- kubelet.PodUpdate{[]api.Pod{}, kubelet.SET, kubelet.FileSource}
|
||||
return fmt.Errorf("path does not exist, ignoring")
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ func (s *sourceFile) extractFromPath() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.updates <- kubelet.PodUpdate{[]api.BoundPod{pod}, kubelet.SET, kubelet.FileSource}
|
||||
s.updates <- kubelet.PodUpdate{[]api.Pod{pod}, kubelet.SET, kubelet.FileSource}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("path is not a directory or file")
|
||||
|
@ -95,13 +95,13 @@ func (s *sourceFile) extractFromPath() error {
|
|||
// Get as many pod configs as we can from a directory. Return an error iff something
|
||||
// prevented us from reading anything at all. Do not return an error if only some files
|
||||
// were problematic.
|
||||
func extractFromDir(name string) ([]api.BoundPod, error) {
|
||||
func extractFromDir(name string) ([]api.Pod, error) {
|
||||
dirents, err := filepath.Glob(filepath.Join(name, "[^.]*"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("glob failed: %v", err)
|
||||
}
|
||||
|
||||
pods := make([]api.BoundPod, 0)
|
||||
pods := make([]api.Pod, 0)
|
||||
if len(dirents) == 0 {
|
||||
return pods, nil
|
||||
}
|
||||
|
@ -131,8 +131,8 @@ func extractFromDir(name string) ([]api.BoundPod, error) {
|
|||
return pods, nil
|
||||
}
|
||||
|
||||
func extractFromFile(filename string) (api.BoundPod, error) {
|
||||
var pod api.BoundPod
|
||||
func extractFromFile(filename string) (api.Pod, error) {
|
||||
var pod api.Pod
|
||||
|
||||
glog.V(3).Infof("Reading config file %q", filename)
|
||||
file, err := os.Open(filename)
|
||||
|
@ -153,10 +153,10 @@ func extractFromFile(filename string) (api.BoundPod, error) {
|
|||
// becomes nicer). Until then, we assert that the ContainerManifest
|
||||
// structure on disk is always v1beta1. Read that, convert it to a
|
||||
// "current" ContainerManifest (should be ~identical), then convert
|
||||
// that to a BoundPod (which is a well-understood conversion). This
|
||||
// avoids writing a v1beta1.ContainerManifest -> api.BoundPod
|
||||
// that to a Pod (which is a well-understood conversion). This
|
||||
// avoids writing a v1beta1.ContainerManifest -> api.Pod
|
||||
// conversion which would be identical to the api.ContainerManifest ->
|
||||
// api.BoundPod conversion.
|
||||
// api.Pod conversion.
|
||||
oldManifest := &v1beta1.ContainerManifest{}
|
||||
if err := yaml.Unmarshal(data, oldManifest); err != nil {
|
||||
return pod, fmt.Errorf("can't unmarshal file %q: %v", filename, err)
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
func ExampleManifestAndPod(id string) (v1beta1.ContainerManifest, api.BoundPod) {
|
||||
func ExampleManifestAndPod(id string) (v1beta1.ContainerManifest, api.Pod) {
|
||||
manifest := v1beta1.ContainerManifest{
|
||||
ID: id,
|
||||
UUID: types.UID(id),
|
||||
|
@ -52,7 +52,7 @@ func ExampleManifestAndPod(id string) (v1beta1.ContainerManifest, api.BoundPod)
|
|||
},
|
||||
},
|
||||
}
|
||||
expectedPod := api.BoundPod{
|
||||
expectedPod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: id,
|
||||
UID: types.UID(id),
|
||||
|
@ -130,7 +130,7 @@ func TestReadFromFile(t *testing.T) {
|
|||
select {
|
||||
case got := <-ch:
|
||||
update := got.(kubelet.PodUpdate)
|
||||
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.BoundPod{
|
||||
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "",
|
||||
UID: "12345",
|
||||
|
@ -179,7 +179,7 @@ func TestReadFromFileWithoutID(t *testing.T) {
|
|||
select {
|
||||
case got := <-ch:
|
||||
update := got.(kubelet.PodUpdate)
|
||||
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.BoundPod{
|
||||
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "",
|
||||
UID: "12345",
|
||||
|
@ -219,7 +219,7 @@ func TestReadV1Beta2FromFile(t *testing.T) {
|
|||
select {
|
||||
case got := <-ch:
|
||||
update := got.(kubelet.PodUpdate)
|
||||
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.BoundPod{
|
||||
expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "",
|
||||
UID: "12345",
|
||||
|
@ -303,7 +303,7 @@ func TestExtractFromDir(t *testing.T) {
|
|||
manifest2, expectedPod2 := ExampleManifestAndPod("2")
|
||||
|
||||
manifests := []v1beta1.ContainerManifest{manifest, manifest2}
|
||||
pods := []api.BoundPod{expectedPod, expectedPod2}
|
||||
pods := []api.Pod{expectedPod, expectedPod2}
|
||||
files := make([]*os.File, len(manifests))
|
||||
|
||||
dirName, err := ioutil.TempDir("", "foo")
|
||||
|
@ -357,7 +357,7 @@ func TestExtractFromDir(t *testing.T) {
|
|||
t.Fatalf("Expected %#v, Got %#v", expected, update)
|
||||
}
|
||||
for i := range update.Pods {
|
||||
if errs := validation.ValidateBoundPod(&update.Pods[i]); len(errs) != 0 {
|
||||
if errs := validation.ValidatePod(&update.Pods[i]); len(errs) != 0 {
|
||||
t.Errorf("Expected no validation errors on %#v, Got %q", update.Pods[i], errs)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ func (s *sourceURL) extractFromURL() error {
|
|||
}
|
||||
if len(data) == 0 {
|
||||
// Emit an update with an empty PodList to allow HTTPSource to be marked as seen
|
||||
s.updates <- kubelet.PodUpdate{[]api.BoundPod{}, kubelet.SET, kubelet.HTTPSource}
|
||||
s.updates <- kubelet.PodUpdate{[]api.Pod{}, kubelet.SET, kubelet.HTTPSource}
|
||||
return fmt.Errorf("zero-length data received from %v", s.url)
|
||||
}
|
||||
// Short circuit if the manifest has not changed since the last time it was read.
|
||||
|
@ -94,7 +94,7 @@ func (s *sourceURL) extractFromURL() error {
|
|||
if err = applyDefaults(&pod, s.url); err != nil {
|
||||
return err
|
||||
}
|
||||
s.updates <- kubelet.PodUpdate{[]api.BoundPod{pod}, kubelet.SET, kubelet.HTTPSource}
|
||||
s.updates <- kubelet.PodUpdate{[]api.Pod{pod}, kubelet.SET, kubelet.HTTPSource}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -128,7 +128,7 @@ func (s *sourceURL) extractFromURL() error {
|
|||
s.url, string(data), singleErr, manifest, multiErr, manifests)
|
||||
}
|
||||
|
||||
func tryDecodeSingle(data []byte) (parsed bool, manifest v1beta1.ContainerManifest, pod api.BoundPod, err error) {
|
||||
func tryDecodeSingle(data []byte) (parsed bool, manifest v1beta1.ContainerManifest, pod api.Pod, err error) {
|
||||
// TODO: should be api.Scheme.Decode
|
||||
// This is awful. DecodeInto() expects to find an APIObject, which
|
||||
// Manifest is not. We keep reading manifest for now for compat, but
|
||||
|
@ -136,10 +136,10 @@ func tryDecodeSingle(data []byte) (parsed bool, manifest v1beta1.ContainerManife
|
|||
// becomes nicer). Until then, we assert that the ContainerManifest
|
||||
// structure on disk is always v1beta1. Read that, convert it to a
|
||||
// "current" ContainerManifest (should be ~identical), then convert
|
||||
// that to a BoundPod (which is a well-understood conversion). This
|
||||
// avoids writing a v1beta1.ContainerManifest -> api.BoundPod
|
||||
// that to a Pod (which is a well-understood conversion). This
|
||||
// avoids writing a v1beta1.ContainerManifest -> api.Pod
|
||||
// conversion which would be identical to the api.ContainerManifest ->
|
||||
// api.BoundPod conversion.
|
||||
// api.Pod conversion.
|
||||
if err = yaml.Unmarshal(data, &manifest); err != nil {
|
||||
return false, manifest, pod, err
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ func tryDecodeSingle(data []byte) (parsed bool, manifest v1beta1.ContainerManife
|
|||
return true, manifest, pod, nil
|
||||
}
|
||||
|
||||
func tryDecodeList(data []byte) (parsed bool, manifests []v1beta1.ContainerManifest, pods api.BoundPods, err error) {
|
||||
func tryDecodeList(data []byte) (parsed bool, manifests []v1beta1.ContainerManifest, pods api.PodList, err error) {
|
||||
// TODO: should be api.Scheme.Decode
|
||||
// See the comment in tryDecodeSingle().
|
||||
if err = yaml.Unmarshal(data, &manifests); err != nil {
|
||||
|
@ -183,7 +183,7 @@ func tryDecodeList(data []byte) (parsed bool, manifests []v1beta1.ContainerManif
|
|||
return true, manifests, pods, nil
|
||||
}
|
||||
|
||||
func applyDefaults(pod *api.BoundPod, url string) error {
|
||||
func applyDefaults(pod *api.Pod, url string) error {
|
||||
if len(pod.UID) == 0 {
|
||||
hasher := md5.New()
|
||||
fmt.Fprintf(hasher, "url:%s", url)
|
||||
|
|
|
@ -132,12 +132,12 @@ func TestExtractFromHTTP(t *testing.T) {
|
|||
Containers: []v1beta1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1beta1.PullAlways}}},
|
||||
expected: CreatePodUpdate(kubelet.SET,
|
||||
kubelet.HTTPSource,
|
||||
api.BoundPod{
|
||||
api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "111",
|
||||
Name: "foo" + "-" + hostname,
|
||||
Namespace: "foobar",
|
||||
SelfLink: "/api/v1beta1/boundPods/foo",
|
||||
SelfLink: "/api/v1beta1/pods/foo",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicy{Always: &api.RestartPolicyAlways{}},
|
||||
|
@ -155,7 +155,7 @@ func TestExtractFromHTTP(t *testing.T) {
|
|||
manifests: api.ContainerManifest{Version: "v1beta1", UUID: "111"},
|
||||
expected: CreatePodUpdate(kubelet.SET,
|
||||
kubelet.HTTPSource,
|
||||
api.BoundPod{
|
||||
api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "111",
|
||||
Name: "111" + "-" + hostname,
|
||||
|
@ -173,12 +173,12 @@ func TestExtractFromHTTP(t *testing.T) {
|
|||
Containers: []v1beta1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1beta1.PullAlways}}},
|
||||
expected: CreatePodUpdate(kubelet.SET,
|
||||
kubelet.HTTPSource,
|
||||
api.BoundPod{
|
||||
api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "111",
|
||||
Name: "foo" + "-" + hostname,
|
||||
Namespace: "foobar",
|
||||
SelfLink: "/api/v1beta1/boundPods/foo",
|
||||
SelfLink: "/api/v1beta1/pods/foo",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicy{Always: &api.RestartPolicyAlways{}},
|
||||
|
@ -201,12 +201,12 @@ func TestExtractFromHTTP(t *testing.T) {
|
|||
},
|
||||
expected: CreatePodUpdate(kubelet.SET,
|
||||
kubelet.HTTPSource,
|
||||
api.BoundPod{
|
||||
api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "111",
|
||||
Name: "foo" + "-" + hostname,
|
||||
Namespace: "foobar",
|
||||
SelfLink: "/api/v1beta1/boundPods/foo",
|
||||
SelfLink: "/api/v1beta1/pods/foo",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicy{Always: &api.RestartPolicyAlways{}},
|
||||
|
@ -218,12 +218,12 @@ func TestExtractFromHTTP(t *testing.T) {
|
|||
ImagePullPolicy: "Always"}},
|
||||
},
|
||||
},
|
||||
api.BoundPod{
|
||||
api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "222",
|
||||
Name: "bar" + "-" + hostname,
|
||||
Namespace: "foobar",
|
||||
SelfLink: "/api/v1beta1/boundPods/bar",
|
||||
SelfLink: "/api/v1beta1/pods/bar",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicy{Always: &api.RestartPolicyAlways{}},
|
||||
|
@ -274,7 +274,7 @@ func TestExtractFromHTTP(t *testing.T) {
|
|||
t.Errorf("%s: Expected: %#v, Got: %#v", testCase.desc, testCase.expected, update)
|
||||
}
|
||||
for i := range update.Pods {
|
||||
if errs := validation.ValidateBoundPod(&update.Pods[i]); len(errs) != 0 {
|
||||
if errs := validation.ValidatePod(&update.Pods[i]); len(errs) != 0 {
|
||||
t.Errorf("%s: Expected no validation errors on %#v, Got %v", testCase.desc, update.Pods[i], errors.NewAggregate(errs))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ type SyncHandler interface {
|
|||
// Syncs current state to match the specified pods. SyncPodType specified what
|
||||
// type of sync is occuring per pod. StartTime specifies the time at which
|
||||
// syncing began (for use in monitoring).
|
||||
SyncPods(pods []api.BoundPod, podSyncTypes map[types.UID]metrics.SyncPodType, startTime time.Time) error
|
||||
SyncPods(pods []api.Pod, podSyncTypes map[types.UID]metrics.SyncPodType, startTime time.Time) error
|
||||
}
|
||||
|
||||
type SourcesReadyFn func() bool
|
||||
|
@ -234,7 +234,7 @@ type Kubelet struct {
|
|||
// We make complete array copies out of this while locked, which is OK because once added to this array,
|
||||
// pods are immutable
|
||||
podLock sync.RWMutex
|
||||
pods []api.BoundPod
|
||||
pods []api.Pod
|
||||
|
||||
// Needed to report events for containers belonging to deleted/modified pods.
|
||||
// Tracks references for reporting events
|
||||
|
@ -594,7 +594,7 @@ func (kl *Kubelet) syncNodeStatus() {
|
|||
}
|
||||
}
|
||||
|
||||
func makeBinds(pod *api.BoundPod, container *api.Container, podVolumes volumeMap) []string {
|
||||
func makeBinds(container *api.Container, podVolumes volumeMap) []string {
|
||||
binds := []string{}
|
||||
for _, mount := range container.VolumeMounts {
|
||||
vol, ok := podVolumes[mount.Name]
|
||||
|
@ -698,7 +698,7 @@ func (kl *Kubelet) runHandler(podFullName string, uid types.UID, container *api.
|
|||
|
||||
// fieldPath returns a fieldPath locating container within pod.
|
||||
// Returns an error if the container isn't part of the pod.
|
||||
func fieldPath(pod *api.BoundPod, container *api.Container) (string, error) {
|
||||
func fieldPath(pod *api.Pod, container *api.Container) (string, error) {
|
||||
for i := range pod.Spec.Containers {
|
||||
here := &pod.Spec.Containers[i]
|
||||
if here.Name == container.Name {
|
||||
|
@ -718,7 +718,7 @@ func fieldPath(pod *api.BoundPod, container *api.Container) (string, error) {
|
|||
// TODO: Pods that came to us by static config or over HTTP have no selfLink set, which makes
|
||||
// this fail and log an error. Figure out how we want to identify these pods to the rest of the
|
||||
// system.
|
||||
func containerRef(pod *api.BoundPod, container *api.Container) (*api.ObjectReference, error) {
|
||||
func containerRef(pod *api.Pod, container *api.Container) (*api.ObjectReference, error) {
|
||||
fieldPath, err := fieldPath(pod, container)
|
||||
if err != nil {
|
||||
// TODO: figure out intelligent way to refer to containers that we implicitly
|
||||
|
@ -758,7 +758,7 @@ func (kl *Kubelet) getRef(id dockertools.DockerID) (ref *api.ObjectReference, ok
|
|||
}
|
||||
|
||||
// Run a single container from a pod. Returns the docker container ID
|
||||
func (kl *Kubelet) runContainer(pod *api.BoundPod, container *api.Container, podVolumes volumeMap, netMode, ipcMode string) (id dockertools.DockerID, err error) {
|
||||
func (kl *Kubelet) runContainer(pod *api.Pod, container *api.Container, podVolumes volumeMap, netMode, ipcMode string) (id dockertools.DockerID, err error) {
|
||||
ref, err := containerRef(pod, container)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err)
|
||||
|
@ -768,7 +768,7 @@ func (kl *Kubelet) runContainer(pod *api.BoundPod, container *api.Container, pod
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
binds := makeBinds(pod, container, podVolumes)
|
||||
binds := makeBinds(container, podVolumes)
|
||||
exposedPorts, portBindings := makePortsAndBindings(container)
|
||||
|
||||
opts := docker.CreateContainerOptions{
|
||||
|
@ -943,7 +943,7 @@ func (kl *Kubelet) makeEnvironmentVariables(ns string, container *api.Container)
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func (kl *Kubelet) applyClusterDNS(hc *docker.HostConfig, pod *api.BoundPod) error {
|
||||
func (kl *Kubelet) applyClusterDNS(hc *docker.HostConfig, pod *api.Pod) error {
|
||||
// Get host DNS settings and append them to cluster DNS settings.
|
||||
f, err := os.Open("/etc/resolv.conf")
|
||||
if err != nil {
|
||||
|
@ -1024,7 +1024,7 @@ const (
|
|||
)
|
||||
|
||||
// createPodInfraContainer starts the pod infra container for a pod. Returns the docker container ID of the newly created container.
|
||||
func (kl *Kubelet) createPodInfraContainer(pod *api.BoundPod) (dockertools.DockerID, error) {
|
||||
func (kl *Kubelet) createPodInfraContainer(pod *api.Pod) (dockertools.DockerID, error) {
|
||||
var ports []api.ContainerPort
|
||||
// Docker only exports ports from the pod infra container. Let's
|
||||
// collect all of the relevant ports and export them.
|
||||
|
@ -1095,7 +1095,7 @@ func (kl *Kubelet) pullImage(img string, ref *api.ObjectReference) error {
|
|||
}
|
||||
|
||||
// Kill all containers in a pod. Returns the number of containers deleted and an error if one occurs.
|
||||
func (kl *Kubelet) killContainersInPod(pod *api.BoundPod, dockerContainers dockertools.DockerContainers) (int, error) {
|
||||
func (kl *Kubelet) killContainersInPod(pod *api.Pod, dockerContainers dockertools.DockerContainers) (int, error) {
|
||||
podFullName := GetPodFullName(pod)
|
||||
|
||||
count := 0
|
||||
|
@ -1132,7 +1132,7 @@ func (kl *Kubelet) killContainersInPod(pod *api.BoundPod, dockerContainers docke
|
|||
type empty struct{}
|
||||
|
||||
// makePodDataDirs creates the dirs for the pod datas.
|
||||
func (kl *Kubelet) makePodDataDirs(pod *api.BoundPod) error {
|
||||
func (kl *Kubelet) makePodDataDirs(pod *api.Pod) error {
|
||||
uid := pod.UID
|
||||
if err := os.Mkdir(kl.getPodDir(uid), 0750); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
|
@ -1146,7 +1146,7 @@ func (kl *Kubelet) makePodDataDirs(pod *api.BoundPod) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (kl *Kubelet) shouldContainerBeRestarted(container *api.Container, pod *api.BoundPod) bool {
|
||||
func (kl *Kubelet) shouldContainerBeRestarted(container *api.Container, pod *api.Pod) bool {
|
||||
podFullName := GetPodFullName(pod)
|
||||
// Check RestartPolicy for dead container
|
||||
recentContainers, err := dockertools.GetRecentDockerContainersWithNameAndUUID(kl.dockerClient, podFullName, pod.UID, container.Name)
|
||||
|
@ -1189,7 +1189,7 @@ func (kl *Kubelet) getPodInfraContainer(podFullName string, uid types.UID,
|
|||
|
||||
// Attempts to start a container pulling the image before that if necessary. It returns DockerID of a started container
|
||||
// if it was successful, and a non-nil error otherwise.
|
||||
func (kl *Kubelet) pullImageAndRunContainer(pod *api.BoundPod, container *api.Container, podVolumes *volumeMap,
|
||||
func (kl *Kubelet) pullImageAndRunContainer(pod *api.Pod, container *api.Container, podVolumes *volumeMap,
|
||||
podInfraContainerID dockertools.DockerID) (dockertools.DockerID, error) {
|
||||
podFullName := GetPodFullName(pod)
|
||||
ref, err := containerRef(pod, container)
|
||||
|
@ -1240,7 +1240,7 @@ type podContainerChangesSpec struct {
|
|||
containersToKeep map[dockertools.DockerID]int
|
||||
}
|
||||
|
||||
func (kl *Kubelet) computePodContainerChanges(pod *api.BoundPod, containersInPod dockertools.DockerContainers) (podContainerChangesSpec, error) {
|
||||
func (kl *Kubelet) computePodContainerChanges(pod *api.Pod, containersInPod dockertools.DockerContainers) (podContainerChangesSpec, error) {
|
||||
podFullName := GetPodFullName(pod)
|
||||
uid := pod.UID
|
||||
glog.V(4).Infof("Syncing Pod %+v, podFullName: %q, uid: %q", pod, podFullName, uid)
|
||||
|
@ -1343,7 +1343,7 @@ func (kl *Kubelet) computePodContainerChanges(pod *api.BoundPod, containersInPod
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (kl *Kubelet) syncPod(pod *api.BoundPod, containersInPod dockertools.DockerContainers) error {
|
||||
func (kl *Kubelet) syncPod(pod *api.Pod, containersInPod dockertools.DockerContainers) error {
|
||||
podFullName := GetPodFullName(pod)
|
||||
uid := pod.UID
|
||||
containerChanges, err := kl.computePodContainerChanges(pod, containersInPod)
|
||||
|
@ -1427,7 +1427,7 @@ type podContainer struct {
|
|||
|
||||
// Stores all volumes defined by the set of pods into a map.
|
||||
// Keys for each entry are in the format (POD_ID)/(VOLUME_NAME)
|
||||
func getDesiredVolumes(pods []api.BoundPod) map[string]api.Volume {
|
||||
func getDesiredVolumes(pods []api.Pod) map[string]api.Volume {
|
||||
desiredVolumes := make(map[string]api.Volume)
|
||||
for _, pod := range pods {
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
|
@ -1438,7 +1438,7 @@ func getDesiredVolumes(pods []api.BoundPod) map[string]api.Volume {
|
|||
return desiredVolumes
|
||||
}
|
||||
|
||||
func (kl *Kubelet) cleanupOrphanedPods(pods []api.BoundPod) error {
|
||||
func (kl *Kubelet) cleanupOrphanedPods(pods []api.Pod) error {
|
||||
desired := util.NewStringSet()
|
||||
for i := range pods {
|
||||
desired.Insert(string(pods[i].UID))
|
||||
|
@ -1461,7 +1461,7 @@ func (kl *Kubelet) cleanupOrphanedPods(pods []api.BoundPod) error {
|
|||
|
||||
// Compares the map of current volumes to the map of desired volumes.
|
||||
// If an active volume does not have a respective desired volume, clean it up.
|
||||
func (kl *Kubelet) cleanupOrphanedVolumes(pods []api.BoundPod, running []*docker.Container) error {
|
||||
func (kl *Kubelet) cleanupOrphanedVolumes(pods []api.Pod, running []*docker.Container) error {
|
||||
desiredVolumes := getDesiredVolumes(pods)
|
||||
currentVolumes := kl.getPodVolumesFromDisk()
|
||||
runningSet := util.StringSet{}
|
||||
|
@ -1496,7 +1496,7 @@ func (kl *Kubelet) cleanupOrphanedVolumes(pods []api.BoundPod, running []*docker
|
|||
}
|
||||
|
||||
// SyncPods synchronizes the configured list of pods (desired state) with the host current state.
|
||||
func (kl *Kubelet) SyncPods(allPods []api.BoundPod, podSyncTypes map[types.UID]metrics.SyncPodType, start time.Time) error {
|
||||
func (kl *Kubelet) SyncPods(allPods []api.Pod, podSyncTypes map[types.UID]metrics.SyncPodType, start time.Time) error {
|
||||
defer func() {
|
||||
metrics.SyncPodsLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||
}()
|
||||
|
@ -1509,7 +1509,7 @@ func (kl *Kubelet) SyncPods(allPods []api.BoundPod, podSyncTypes map[types.UID]m
|
|||
kl.removeOrphanedStatuses(podFullNames)
|
||||
|
||||
// Filtered out the rejected pod. They don't have running containers.
|
||||
var pods []api.BoundPod
|
||||
var pods []api.Pod
|
||||
for _, pod := range allPods {
|
||||
status, ok := kl.getPodStatusFromCache(GetPodFullName(&pod))
|
||||
if ok && status.Phase == api.PodFailed {
|
||||
|
@ -1607,9 +1607,9 @@ func (kl *Kubelet) SyncPods(allPods []api.BoundPod, podSyncTypes map[types.UID]m
|
|||
return err
|
||||
}
|
||||
|
||||
func updateBoundPods(changed []api.BoundPod, current []api.BoundPod) []api.BoundPod {
|
||||
updated := []api.BoundPod{}
|
||||
m := map[types.UID]*api.BoundPod{}
|
||||
func updatePods(changed []api.Pod, current []api.Pod) []api.Pod {
|
||||
updated := []api.Pod{}
|
||||
m := map[types.UID]*api.Pod{}
|
||||
for i := range changed {
|
||||
pod := &changed[i]
|
||||
m[pod.UID] = pod
|
||||
|
@ -1629,7 +1629,7 @@ func updateBoundPods(changed []api.BoundPod, current []api.BoundPod) []api.Bound
|
|||
return updated
|
||||
}
|
||||
|
||||
type podsByCreationTime []api.BoundPod
|
||||
type podsByCreationTime []api.Pod
|
||||
|
||||
func (s podsByCreationTime) Len() int {
|
||||
return len(s)
|
||||
|
@ -1644,8 +1644,8 @@ func (s podsByCreationTime) Less(i, j int) bool {
|
|||
}
|
||||
|
||||
// getHostPortConflicts detects pods with conflicted host ports and return them.
|
||||
func getHostPortConflicts(pods []api.BoundPod) []api.BoundPod {
|
||||
conflicts := []api.BoundPod{}
|
||||
func getHostPortConflicts(pods []api.Pod) []api.Pod {
|
||||
conflicts := []api.Pod{}
|
||||
ports := map[int]bool{}
|
||||
extract := func(p *api.ContainerPort) int { return p.HostPort }
|
||||
|
||||
|
@ -1665,7 +1665,7 @@ func getHostPortConflicts(pods []api.BoundPod) []api.BoundPod {
|
|||
}
|
||||
|
||||
// handleHostPortConflicts handles pods that conflict on Port.HostPort values.
|
||||
func (kl *Kubelet) handleHostPortConflicts(pods []api.BoundPod) {
|
||||
func (kl *Kubelet) handleHostPortConflicts(pods []api.Pod) {
|
||||
conflicts := getHostPortConflicts(pods)
|
||||
for _, pod := range conflicts {
|
||||
kl.recorder.Eventf(&pod, "hostPortConflict", "Cannot start the pod due to host port conflict.")
|
||||
|
@ -1704,7 +1704,7 @@ func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) {
|
|||
}
|
||||
}
|
||||
|
||||
pods, err := kl.GetBoundPods()
|
||||
pods, err := kl.GetPods()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get bound pods.")
|
||||
return
|
||||
|
@ -1746,7 +1746,7 @@ func (kl *Kubelet) updatePods(u PodUpdate, podSyncTypes map[types.UID]metrics.Sy
|
|||
for i := range u.Pods {
|
||||
podSyncTypes[u.Pods[i].UID] = metrics.SyncPodUpdate
|
||||
}
|
||||
kl.pods = updateBoundPods(u.Pods, kl.pods)
|
||||
kl.pods = updatePods(u.Pods, kl.pods)
|
||||
kl.handleHostPortConflicts(kl.pods)
|
||||
default:
|
||||
panic("syncLoop does not support incremental changes")
|
||||
|
@ -1818,15 +1818,15 @@ func (kl *Kubelet) GetHostname() string {
|
|||
return kl.hostname
|
||||
}
|
||||
|
||||
// GetBoundPods returns all pods bound to the kubelet and their spec.
|
||||
func (kl *Kubelet) GetBoundPods() ([]api.BoundPod, error) {
|
||||
// GetPods returns all pods bound to the kubelet and their spec.
|
||||
func (kl *Kubelet) GetPods() ([]api.Pod, error) {
|
||||
kl.podLock.RLock()
|
||||
defer kl.podLock.RUnlock()
|
||||
return append([]api.BoundPod{}, kl.pods...), nil
|
||||
return append([]api.Pod{}, kl.pods...), nil
|
||||
}
|
||||
|
||||
// GetPodByName provides the first pod that matches namespace and name, as well as whether the node was found.
|
||||
func (kl *Kubelet) GetPodByName(namespace, name string) (*api.BoundPod, bool) {
|
||||
func (kl *Kubelet) GetPodByName(namespace, name string) (*api.Pod, bool) {
|
||||
kl.podLock.RLock()
|
||||
defer kl.podLock.RUnlock()
|
||||
for i := range kl.pods {
|
||||
|
|
|
@ -83,7 +83,7 @@ func newTestKubelet(t *testing.T) *TestKubelet {
|
|||
waitGroup := new(sync.WaitGroup)
|
||||
kubelet.podWorkers = newPodWorkers(
|
||||
fakeDockerCache,
|
||||
func(pod *api.BoundPod, containers dockertools.DockerContainers) error {
|
||||
func(pod *api.Pod, containers dockertools.DockerContainers) error {
|
||||
err := kubelet.syncPod(pod, containers)
|
||||
waitGroup.Done()
|
||||
return err
|
||||
|
@ -381,7 +381,7 @@ func TestKillContainer(t *testing.T) {
|
|||
}
|
||||
|
||||
type channelReader struct {
|
||||
list [][]api.BoundPod
|
||||
list [][]api.Pod
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
|
@ -401,7 +401,7 @@ func startReading(channel <-chan interface{}) *channelReader {
|
|||
return cr
|
||||
}
|
||||
|
||||
func (cr *channelReader) GetList() [][]api.BoundPod {
|
||||
func (cr *channelReader) GetList() [][]api.Pod {
|
||||
cr.wg.Wait()
|
||||
return cr.list
|
||||
}
|
||||
|
@ -427,7 +427,7 @@ func TestSyncPodsDoesNothing(t *testing.T) {
|
|||
ID: "9876",
|
||||
},
|
||||
}
|
||||
kubelet.pods = []api.BoundPod{
|
||||
kubelet.pods = []api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
|
@ -460,7 +460,7 @@ func TestSyncPodsWithTerminationLog(t *testing.T) {
|
|||
TerminationMessagePath: "/dev/somepath",
|
||||
}
|
||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||
kubelet.pods = []api.BoundPod{
|
||||
kubelet.pods = []api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
|
@ -509,7 +509,7 @@ func TestSyncPodsCreatesNetAndContainer(t *testing.T) {
|
|||
waitGroup := testKubelet.waitGroup
|
||||
kubelet.podInfraContainerImage = "custom_image_name"
|
||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||
kubelet.pods = []api.BoundPod{
|
||||
kubelet.pods = []api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
|
@ -562,7 +562,7 @@ func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) {
|
|||
puller.HasImages = []string{}
|
||||
kubelet.podInfraContainerImage = "custom_image_name"
|
||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||
kubelet.pods = []api.BoundPod{
|
||||
kubelet.pods = []api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
|
@ -612,7 +612,7 @@ func TestSyncPodsWithPodInfraCreatesContainer(t *testing.T) {
|
|||
ID: "9876",
|
||||
},
|
||||
}
|
||||
kubelet.pods = []api.BoundPod{
|
||||
kubelet.pods = []api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
|
@ -658,7 +658,7 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
|
|||
ID: "9876",
|
||||
},
|
||||
}
|
||||
kubelet.pods = []api.BoundPod{
|
||||
kubelet.pods = []api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
|
@ -726,7 +726,7 @@ func TestSyncPodsDeletesWithNoPodInfraContainer(t *testing.T) {
|
|||
ID: "8765",
|
||||
},
|
||||
}
|
||||
kubelet.pods = []api.BoundPod{
|
||||
kubelet.pods = []api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
|
@ -793,7 +793,7 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
|
|||
ID: "9876",
|
||||
},
|
||||
}
|
||||
if err := kubelet.SyncPods([]api.BoundPod{}, emptyPodUIDs, time.Now()); err != nil {
|
||||
if err := kubelet.SyncPods([]api.Pod{}, emptyPodUIDs, time.Now()); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
// Validate nothing happened.
|
||||
|
@ -801,7 +801,7 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
|
|||
fakeDocker.ClearCalls()
|
||||
|
||||
ready = true
|
||||
if err := kubelet.SyncPods([]api.BoundPod{}, emptyPodUIDs, time.Now()); err != nil {
|
||||
if err := kubelet.SyncPods([]api.Pod{}, emptyPodUIDs, time.Now()); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
verifyCalls(t, fakeDocker, []string{"list", "stop", "stop", "inspect_container", "inspect_container"})
|
||||
|
@ -839,7 +839,7 @@ func TestSyncPodsDeletes(t *testing.T) {
|
|||
ID: "4567",
|
||||
},
|
||||
}
|
||||
err := kubelet.SyncPods([]api.BoundPod{}, emptyPodUIDs, time.Now())
|
||||
err := kubelet.SyncPods([]api.Pod{}, emptyPodUIDs, time.Now())
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -880,7 +880,7 @@ func TestSyncPodDeletesDuplicate(t *testing.T) {
|
|||
ID: "4567",
|
||||
},
|
||||
}
|
||||
bound := api.BoundPod{
|
||||
bound := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "bar",
|
||||
|
@ -921,7 +921,7 @@ func TestSyncPodBadHash(t *testing.T) {
|
|||
ID: "9876",
|
||||
},
|
||||
}
|
||||
bound := api.BoundPod{
|
||||
bound := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
|
@ -971,7 +971,7 @@ func TestSyncPodUnhealthy(t *testing.T) {
|
|||
ID: "9876",
|
||||
},
|
||||
}
|
||||
bound := api.BoundPod{
|
||||
bound := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
|
@ -1011,7 +1011,7 @@ func TestMountExternalVolumes(t *testing.T) {
|
|||
kubelet := testKubelet.kubelet
|
||||
kubelet.volumePluginMgr.InitPlugins([]volume.Plugin{&volume.FakePlugin{"fake", nil}}, &volumeHost{kubelet})
|
||||
|
||||
pod := api.BoundPod{
|
||||
pod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
|
@ -1036,7 +1036,7 @@ func TestMountExternalVolumes(t *testing.T) {
|
|||
}
|
||||
for _, name := range expectedPodVolumes {
|
||||
if _, ok := podVolumes[name]; !ok {
|
||||
t.Errorf("api.BoundPod volumes map is missing key: %s. %#v", name, podVolumes)
|
||||
t.Errorf("api.Pod volumes map is missing key: %s. %#v", name, podVolumes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1115,21 +1115,13 @@ func TestMakeVolumesAndBinds(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
pod := api.BoundPod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "pod",
|
||||
Namespace: "test",
|
||||
},
|
||||
}
|
||||
|
||||
podVolumes := volumeMap{
|
||||
"disk": &stubVolume{"/mnt/disk"},
|
||||
"disk4": &stubVolume{"/mnt/host"},
|
||||
"disk5": &stubVolume{"/var/lib/kubelet/podID/volumes/empty/disk5"},
|
||||
}
|
||||
|
||||
binds := makeBinds(&pod, &container, podVolumes)
|
||||
binds := makeBinds(&container, podVolumes)
|
||||
|
||||
expectedBinds := []string{
|
||||
"/mnt/disk:/mnt/path",
|
||||
|
@ -1209,14 +1201,14 @@ func TestMakePortsAndBindings(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFieldPath(t *testing.T) {
|
||||
pod := &api.BoundPod{Spec: api.PodSpec{Containers: []api.Container{
|
||||
pod := &api.Pod{Spec: api.PodSpec{Containers: []api.Container{
|
||||
{Name: "foo"},
|
||||
{Name: "bar"},
|
||||
{Name: ""},
|
||||
{Name: "baz"},
|
||||
}}}
|
||||
table := map[string]struct {
|
||||
pod *api.BoundPod
|
||||
pod *api.Pod
|
||||
container *api.Container
|
||||
path string
|
||||
success bool
|
||||
|
@ -1482,7 +1474,7 @@ func TestRunInContainerNoSuchPod(t *testing.T) {
|
|||
podNamespace := "nsFoo"
|
||||
containerName := "containerFoo"
|
||||
output, err := kubelet.RunInContainer(
|
||||
GetPodFullName(&api.BoundPod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
|
||||
GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
|
||||
"",
|
||||
containerName,
|
||||
[]string{"ls"})
|
||||
|
@ -1515,7 +1507,7 @@ func TestRunInContainer(t *testing.T) {
|
|||
|
||||
cmd := []string{"ls"}
|
||||
_, err := kubelet.RunInContainer(
|
||||
GetPodFullName(&api.BoundPod{
|
||||
GetPodFullName(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: podName,
|
||||
|
@ -1663,7 +1655,7 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
|
|||
ID: "9876",
|
||||
},
|
||||
}
|
||||
bound := api.BoundPod{
|
||||
bound := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
|
@ -2066,7 +2058,7 @@ func TestSyncPodsWithPullPolicy(t *testing.T) {
|
|||
kubelet.podInfraContainerImage = "custom_image_name"
|
||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||
waitGroup.Add(1)
|
||||
err := kubelet.SyncPods([]api.BoundPod{
|
||||
err := kubelet.SyncPods([]api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
|
@ -2850,7 +2842,7 @@ func TestExecInContainerNoSuchPod(t *testing.T) {
|
|||
podNamespace := "nsFoo"
|
||||
containerName := "containerFoo"
|
||||
err := kubelet.ExecInContainer(
|
||||
GetPodFullName(&api.BoundPod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
|
||||
GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
|
||||
"",
|
||||
containerName,
|
||||
[]string{"ls"},
|
||||
|
@ -2886,7 +2878,7 @@ func TestExecInContainerNoSuchContainer(t *testing.T) {
|
|||
}
|
||||
|
||||
err := kubelet.ExecInContainer(
|
||||
GetPodFullName(&api.BoundPod{ObjectMeta: api.ObjectMeta{
|
||||
GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
|
@ -2945,7 +2937,7 @@ func TestExecInContainer(t *testing.T) {
|
|||
}
|
||||
|
||||
err := kubelet.ExecInContainer(
|
||||
GetPodFullName(&api.BoundPod{ObjectMeta: api.ObjectMeta{
|
||||
GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
|
@ -2994,7 +2986,7 @@ func TestPortForwardNoSuchPod(t *testing.T) {
|
|||
var port uint16 = 5000
|
||||
|
||||
err := kubelet.PortForward(
|
||||
GetPodFullName(&api.BoundPod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
|
||||
GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
|
||||
"",
|
||||
port,
|
||||
nil,
|
||||
|
@ -3026,7 +3018,7 @@ func TestPortForwardNoSuchContainer(t *testing.T) {
|
|||
}
|
||||
|
||||
err := kubelet.PortForward(
|
||||
GetPodFullName(&api.BoundPod{ObjectMeta: api.ObjectMeta{
|
||||
GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
|
@ -3071,7 +3063,7 @@ func TestPortForward(t *testing.T) {
|
|||
}
|
||||
|
||||
err := kubelet.PortForward(
|
||||
GetPodFullName(&api.BoundPod{ObjectMeta: api.ObjectMeta{
|
||||
GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
|
@ -3096,7 +3088,7 @@ func TestPortForward(t *testing.T) {
|
|||
|
||||
// Tests that identify the host port conflicts are detected correctly.
|
||||
func TestGetHostPortConflicts(t *testing.T) {
|
||||
pods := []api.BoundPod{
|
||||
pods := []api.Pod{
|
||||
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
|
||||
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}},
|
||||
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 82}}}}}},
|
||||
|
@ -3109,11 +3101,11 @@ func TestGetHostPortConflicts(t *testing.T) {
|
|||
}
|
||||
|
||||
// The new pod should cause conflict and be reported.
|
||||
expected := api.BoundPod{
|
||||
expected := api.Pod{
|
||||
Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}},
|
||||
}
|
||||
pods = append(pods, expected)
|
||||
if actual := getHostPortConflicts(pods); !reflect.DeepEqual(actual, []api.BoundPod{expected}) {
|
||||
if actual := getHostPortConflicts(pods); !reflect.DeepEqual(actual, []api.Pod{expected}) {
|
||||
t.Errorf("expected %#v, Got %#v", expected, actual)
|
||||
}
|
||||
}
|
||||
|
@ -3123,7 +3115,7 @@ func TestHandlePortConflicts(t *testing.T) {
|
|||
testKubelet := newTestKubelet(t)
|
||||
kl := testKubelet.kubelet
|
||||
spec := api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}
|
||||
pods := []api.BoundPod{
|
||||
pods := []api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "123456789",
|
||||
|
@ -3141,7 +3133,7 @@ func TestHandlePortConflicts(t *testing.T) {
|
|||
Spec: spec,
|
||||
},
|
||||
}
|
||||
// Make sure the BoundPods are in the reverse order of creation time.
|
||||
// Make sure the Pods are in the reverse order of creation time.
|
||||
pods[1].CreationTimestamp = util.NewTime(time.Now())
|
||||
pods[0].CreationTimestamp = util.NewTime(time.Now().Add(1 * time.Second))
|
||||
// The newer pod should be rejected.
|
||||
|
@ -3174,7 +3166,7 @@ func TestHandlePortConflicts(t *testing.T) {
|
|||
func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
kl := testKubelet.kubelet
|
||||
pods := []api.BoundPod{
|
||||
pods := []api.Pod{
|
||||
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
|
||||
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
|
||||
}
|
||||
|
@ -3184,7 +3176,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
|
|||
t.Fatalf("expected length of status map to be 1. Got map %#v.", kl.podStatuses)
|
||||
}
|
||||
// Sync with empty pods so that the entry in status map will be removed.
|
||||
kl.SyncPods([]api.BoundPod{}, emptyPodUIDs, time.Now())
|
||||
kl.SyncPods([]api.Pod{}, emptyPodUIDs, time.Now())
|
||||
if len(kl.podStatuses) != 0 {
|
||||
t.Fatalf("expected length of status map to be 0. Got map %#v.", kl.podStatuses)
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type syncPodFnType func(*api.BoundPod, dockertools.DockerContainers) error
|
||||
type syncPodFnType func(*api.Pod, dockertools.DockerContainers) error
|
||||
|
||||
type podWorkers struct {
|
||||
// Protects podUpdates field.
|
||||
|
@ -58,7 +58,7 @@ type podWorkers struct {
|
|||
|
||||
type workUpdate struct {
|
||||
// The pod state to reflect.
|
||||
pod *api.BoundPod
|
||||
pod *api.Pod
|
||||
|
||||
// Function to call when the update is complete.
|
||||
updateCompleteFn func()
|
||||
|
@ -106,7 +106,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) {
|
|||
}
|
||||
|
||||
// Apply the new setting to the specified pod. updateComplete is called when the update is completed.
|
||||
func (p *podWorkers) UpdatePod(pod *api.BoundPod, updateComplete func()) {
|
||||
func (p *podWorkers) UpdatePod(pod *api.Pod, updateComplete func()) {
|
||||
uid := pod.UID
|
||||
var podUpdates chan workUpdate
|
||||
var exists bool
|
||||
|
|
|
@ -27,8 +27,8 @@ import (
|
|||
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
func newPod(uid, name string) *api.BoundPod {
|
||||
return &api.BoundPod{
|
||||
func newPod(uid, name string) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: types.UID(uid),
|
||||
Name: name,
|
||||
|
@ -46,7 +46,7 @@ func createPodWorkers() (*podWorkers, map[types.UID][]string) {
|
|||
|
||||
podWorkers := newPodWorkers(
|
||||
fakeDockerCache,
|
||||
func(pod *api.BoundPod, containers dockertools.DockerContainers) error {
|
||||
func(pod *api.Pod, containers dockertools.DockerContainers) error {
|
||||
func() {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
|
|
@ -41,7 +41,7 @@ const maxProbeRetries = 3
|
|||
// probeContainer probes the liveness/readiness of the given container.
|
||||
// If the container's liveness probe is unsuccessful, set readiness to false.
|
||||
// If liveness is successful, do a readiness check and set readiness accordingly.
|
||||
func (kl *Kubelet) probeContainer(pod *api.BoundPod, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (probe.Result, error) {
|
||||
func (kl *Kubelet) probeContainer(pod *api.Pod, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (probe.Result, error) {
|
||||
// Probe liveness.
|
||||
live, err := kl.probeContainerLiveness(pod, status, container, dockerContainer)
|
||||
if err != nil {
|
||||
|
@ -78,7 +78,7 @@ func (kl *Kubelet) probeContainer(pod *api.BoundPod, status api.PodStatus, conta
|
|||
|
||||
// probeContainerLiveness probes the liveness of a container.
|
||||
// If the initalDelay since container creation on liveness probe has not passed the probe will return probe.Success.
|
||||
func (kl *Kubelet) probeContainerLiveness(pod *api.BoundPod, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (probe.Result, error) {
|
||||
func (kl *Kubelet) probeContainerLiveness(pod *api.Pod, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (probe.Result, error) {
|
||||
p := container.LivenessProbe
|
||||
if p == nil {
|
||||
return probe.Success, nil
|
||||
|
@ -91,7 +91,7 @@ func (kl *Kubelet) probeContainerLiveness(pod *api.BoundPod, status api.PodStatu
|
|||
|
||||
// probeContainerLiveness probes the readiness of a container.
|
||||
// If the initial delay on the readiness probe has not passed the probe will return probe.Failure.
|
||||
func (kl *Kubelet) probeContainerReadiness(pod *api.BoundPod, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (probe.Result, error) {
|
||||
func (kl *Kubelet) probeContainerReadiness(pod *api.Pod, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (probe.Result, error) {
|
||||
p := container.ReadinessProbe
|
||||
if p == nil {
|
||||
return probe.Success, nil
|
||||
|
@ -104,7 +104,7 @@ func (kl *Kubelet) probeContainerReadiness(pod *api.BoundPod, status api.PodStat
|
|||
|
||||
// runProbeWithRetries tries to probe the container in a finite loop, it returns the last result
|
||||
// if it never succeeds.
|
||||
func (kl *Kubelet) runProbeWithRetries(p *api.Probe, pod *api.BoundPod, status api.PodStatus, container api.Container, retires int) (probe.Result, error) {
|
||||
func (kl *Kubelet) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, retires int) (probe.Result, error) {
|
||||
var err error
|
||||
var result probe.Result
|
||||
for i := 0; i < retires; i++ {
|
||||
|
@ -116,7 +116,7 @@ func (kl *Kubelet) runProbeWithRetries(p *api.Probe, pod *api.BoundPod, status a
|
|||
return result, err
|
||||
}
|
||||
|
||||
func (kl *Kubelet) runProbe(p *api.Probe, pod *api.BoundPod, status api.PodStatus, container api.Container) (probe.Result, error) {
|
||||
func (kl *Kubelet) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container) (probe.Result, error) {
|
||||
timeout := time.Duration(p.TimeoutSeconds) * time.Second
|
||||
if p.Exec != nil {
|
||||
return kl.prober.exec.Probe(kl.newExecInContainer(pod, container))
|
||||
|
@ -190,7 +190,7 @@ type execInContainer struct {
|
|||
run func() ([]byte, error)
|
||||
}
|
||||
|
||||
func (kl *Kubelet) newExecInContainer(pod *api.BoundPod, container api.Container) exec.Cmd {
|
||||
func (kl *Kubelet) newExecInContainer(pod *api.Pod, container api.Container) exec.Cmd {
|
||||
uid := pod.UID
|
||||
podFullName := GetPodFullName(pod)
|
||||
return execInContainer{func() ([]byte, error) {
|
||||
|
|
|
@ -400,7 +400,7 @@ func TestProbeContainer(t *testing.T) {
|
|||
} else {
|
||||
kl = makeTestKubelet(test.expectedResult, nil)
|
||||
}
|
||||
result, err := kl.probeContainer(&api.BoundPod{}, api.PodStatus{}, test.testContainer, dc)
|
||||
result, err := kl.probeContainer(&api.Pod{}, api.PodStatus{}, test.testContainer, dc)
|
||||
if test.expectError && err == nil {
|
||||
t.Error("Expected error but did no error was returned.")
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ const (
|
|||
)
|
||||
|
||||
type RunPodResult struct {
|
||||
Pod *api.BoundPod
|
||||
Pod *api.Pod
|
||||
Err error
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ func (kl *Kubelet) RunOnce(updates <-chan PodUpdate) ([]RunPodResult, error) {
|
|||
}
|
||||
|
||||
// runOnce runs a given set of pods and returns their status.
|
||||
func (kl *Kubelet) runOnce(pods []api.BoundPod, retryDelay time.Duration) (results []RunPodResult, err error) {
|
||||
func (kl *Kubelet) runOnce(pods []api.Pod, retryDelay time.Duration) (results []RunPodResult, err error) {
|
||||
if kl.dockerPuller == nil {
|
||||
kl.dockerPuller = dockertools.NewDockerPuller(kl.dockerClient, kl.pullQPS, kl.pullBurst)
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ func (kl *Kubelet) runOnce(pods []api.BoundPod, retryDelay time.Duration) (resul
|
|||
}
|
||||
|
||||
// runPod runs a single pod and wait until all containers are running.
|
||||
func (kl *Kubelet) runPod(pod api.BoundPod, retryDelay time.Duration) error {
|
||||
func (kl *Kubelet) runPod(pod api.Pod, retryDelay time.Duration) error {
|
||||
delay := retryDelay
|
||||
retry := 0
|
||||
for {
|
||||
|
@ -119,7 +119,7 @@ func (kl *Kubelet) runPod(pod api.BoundPod, retryDelay time.Duration) error {
|
|||
}
|
||||
|
||||
// isPodRunning returns true if all containers of a manifest are running.
|
||||
func (kl *Kubelet) isPodRunning(pod api.BoundPod, dockerContainers dockertools.DockerContainers) (bool, error) {
|
||||
func (kl *Kubelet) isPodRunning(pod api.Pod, dockerContainers dockertools.DockerContainers) (bool, error) {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
dockerContainer, found, _ := dockerContainers.FindPodContainer(GetPodFullName(&pod), pod.UID, container.Name)
|
||||
if !found {
|
||||
|
|
|
@ -128,7 +128,7 @@ func TestRunOnce(t *testing.T) {
|
|||
t: t,
|
||||
}
|
||||
kb.dockerPuller = &dockertools.FakeDockerPuller{}
|
||||
results, err := kb.runOnce([]api.BoundPod{
|
||||
results, err := kb.runOnce([]api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
|
|
|
@ -84,8 +84,8 @@ type HostInterface interface {
|
|||
GetRootInfo(req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error)
|
||||
GetDockerVersion() ([]uint, error)
|
||||
GetMachineInfo() (*cadvisorApi.MachineInfo, error)
|
||||
GetBoundPods() ([]api.BoundPod, error)
|
||||
GetPodByName(namespace, name string) (*api.BoundPod, bool)
|
||||
GetPods() ([]api.Pod, error)
|
||||
GetPodByName(namespace, name string) (*api.Pod, bool)
|
||||
GetPodStatus(name string, uid types.UID) (api.PodStatus, error)
|
||||
RunInContainer(name string, uid types.UID, container string, cmd []string) ([]byte, error)
|
||||
ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error
|
||||
|
@ -117,7 +117,7 @@ func (s *Server) InstallDefaultHandlers() {
|
|||
s.mux.HandleFunc("/podInfo", s.handlePodInfoOld)
|
||||
s.mux.HandleFunc("/api/v1beta1/podInfo", s.handlePodInfoVersioned)
|
||||
s.mux.HandleFunc("/api/v1beta1/nodeInfo", s.handleNodeInfoVersioned)
|
||||
s.mux.HandleFunc("/boundPods", s.handleBoundPods)
|
||||
s.mux.HandleFunc("/pods", s.handlePods)
|
||||
s.mux.HandleFunc("/stats/", s.handleStats)
|
||||
s.mux.HandleFunc("/spec/", s.handleSpec)
|
||||
}
|
||||
|
@ -258,17 +258,17 @@ func (s *Server) handleContainerLogs(w http.ResponseWriter, req *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
// handleBoundPods returns a list of pod bound to the Kubelet and their spec
|
||||
func (s *Server) handleBoundPods(w http.ResponseWriter, req *http.Request) {
|
||||
pods, err := s.host.GetBoundPods()
|
||||
// handlePods returns a list of pod bounds to the Kubelet and their spec
|
||||
func (s *Server) handlePods(w http.ResponseWriter, req *http.Request) {
|
||||
pods, err := s.host.GetPods()
|
||||
if err != nil {
|
||||
s.error(w, err)
|
||||
return
|
||||
}
|
||||
boundPods := &api.BoundPods{
|
||||
podList := &api.PodList{
|
||||
Items: pods,
|
||||
}
|
||||
data, err := latest.Codec.Encode(boundPods)
|
||||
data, err := latest.Codec.Encode(podList)
|
||||
if err != nil {
|
||||
s.error(w, err)
|
||||
return
|
||||
|
|
|
@ -39,12 +39,12 @@ import (
|
|||
)
|
||||
|
||||
type fakeKubelet struct {
|
||||
podByNameFunc func(namespace, name string) (*api.BoundPod, bool)
|
||||
podByNameFunc func(namespace, name string) (*api.Pod, bool)
|
||||
statusFunc func(name string) (api.PodStatus, error)
|
||||
containerInfoFunc func(podFullName string, uid types.UID, containerName string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error)
|
||||
rootInfoFunc func(query *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error)
|
||||
machineInfoFunc func() (*cadvisorApi.MachineInfo, error)
|
||||
boundPodsFunc func() ([]api.BoundPod, error)
|
||||
podsFunc func() ([]api.Pod, error)
|
||||
logFunc func(w http.ResponseWriter, req *http.Request)
|
||||
runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error)
|
||||
dockerVersionFunc func() ([]uint, error)
|
||||
|
@ -55,7 +55,7 @@ type fakeKubelet struct {
|
|||
hostnameFunc func() string
|
||||
}
|
||||
|
||||
func (fk *fakeKubelet) GetPodByName(namespace, name string) (*api.BoundPod, bool) {
|
||||
func (fk *fakeKubelet) GetPodByName(namespace, name string) (*api.Pod, bool) {
|
||||
return fk.podByNameFunc(namespace, name)
|
||||
}
|
||||
|
||||
|
@ -79,8 +79,8 @@ func (fk *fakeKubelet) GetMachineInfo() (*cadvisorApi.MachineInfo, error) {
|
|||
return fk.machineInfoFunc()
|
||||
}
|
||||
|
||||
func (fk *fakeKubelet) GetBoundPods() ([]api.BoundPod, error) {
|
||||
return fk.boundPodsFunc()
|
||||
func (fk *fakeKubelet) GetPods() ([]api.Pod, error) {
|
||||
return fk.podsFunc()
|
||||
}
|
||||
|
||||
func (fk *fakeKubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
|
||||
|
@ -125,8 +125,8 @@ func newServerTest() *serverTestFramework {
|
|||
}
|
||||
fw.updateReader = startReading(fw.updateChan)
|
||||
fw.fakeKubelet = &fakeKubelet{
|
||||
podByNameFunc: func(namespace, name string) (*api.BoundPod, bool) {
|
||||
return &api.BoundPod{
|
||||
podByNameFunc: func(namespace, name string) (*api.Pod, bool) {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
|
@ -510,8 +510,8 @@ func TestHealthCheck(t *testing.T) {
|
|||
}
|
||||
|
||||
func setPodByNameFunc(fw *serverTestFramework, namespace, pod, container string) {
|
||||
fw.fakeKubelet.podByNameFunc = func(namespace, name string) (*api.BoundPod, bool) {
|
||||
return &api.BoundPod{
|
||||
fw.fakeKubelet.podByNameFunc = func(namespace, name string) (*api.Pod, bool) {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: pod,
|
||||
|
|
|
@ -62,13 +62,13 @@ const (
|
|||
// functionally similar, this helps our unit tests properly check that the correct PodUpdates
|
||||
// are generated.
|
||||
type PodUpdate struct {
|
||||
Pods []api.BoundPod
|
||||
Pods []api.Pod
|
||||
Op PodOperation
|
||||
Source string
|
||||
}
|
||||
|
||||
// GetPodFullName returns a name that uniquely identifies a pod across all config sources.
|
||||
func GetPodFullName(pod *api.BoundPod) string {
|
||||
func GetPodFullName(pod *api.Pod) string {
|
||||
// Use underscore as the delimiter because it is not allowed in pod name
|
||||
// (DNS subdomain format), while allowed in the container name format.
|
||||
return fmt.Sprintf("%s_%s", pod.Name, pod.Namespace)
|
||||
|
|
|
@ -72,7 +72,7 @@ func (kl *Kubelet) newVolumeBuilderFromPlugins(spec *api.Volume, podRef *api.Obj
|
|||
return builder
|
||||
}
|
||||
|
||||
func (kl *Kubelet) mountExternalVolumes(pod *api.BoundPod) (volumeMap, error) {
|
||||
func (kl *Kubelet) mountExternalVolumes(pod *api.Pod) (volumeMap, error) {
|
||||
podVolumes := make(volumeMap)
|
||||
for i := range pod.Spec.Volumes {
|
||||
volSpec := &pod.Spec.Volumes[i]
|
||||
|
|
|
@ -115,7 +115,7 @@ var _ = Describe("Events", func() {
|
|||
labels.Everything(),
|
||||
labels.Set{
|
||||
"involvedObject.uid": string(podWithUid.UID),
|
||||
"involvedObject.kind": "BoundPod",
|
||||
"involvedObject.kind": "Pod",
|
||||
"involvedObject.namespace": api.NamespaceDefault,
|
||||
"source": "kubelet",
|
||||
}.AsSelector(),
|
||||
|
|
Loading…
Reference in New Issue