Upgrade default k8s to v1.4.3

No manual changes needed
pull/730/head
Matt Rickard 2016-10-20 11:42:45 -07:00
parent 5fb2b35698
commit 2202d5ccfc
10 changed files with 1229 additions and 1134 deletions

2056
Godeps/Godeps.json generated

File diff suppressed because it is too large Load Diff

View File

@ -26,7 +26,7 @@ minikube start
--insecure-registry value Insecure Docker registries to pass to the Docker daemon (default [])
--iso-url string Location of the minikube iso (default "https://storage.googleapis.com/minikube/minikube-0.7.iso")
--kubernetes-version string The kubernetes version that the minikube VM will (ex: v1.2.3)
OR a URI which contains a localkube binary (ex: https://storage.googleapis.com/minikube/k8sReleases/v1.3.0/localkube-linux-amd64) (default "v1.4.1")
OR a URI which contains a localkube binary (ex: https://storage.googleapis.com/minikube/k8sReleases/v1.3.0/localkube-linux-amd64) (default "v1.4.3")
--memory int Amount of RAM allocated to the minikube VM (default 2048)
--network-plugin string The name of the network plugin
--registry-mirror value Registry mirrors to pass to the Docker daemon (default [])

View File

@ -268,17 +268,37 @@ type GroupVersions []GroupVersion
// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false
// if none of the options match the group.
func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) {
func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) {
var targets []GroupVersionKind
for _, gv := range gvs {
target, ok := gv.KindForGroupVersionKinds(kinds)
if !ok {
continue
}
return target, true
targets = append(targets, target)
}
if len(targets) == 1 {
return targets[0], true
}
if len(targets) > 1 {
return bestMatch(kinds, targets), true
}
return GroupVersionKind{}, false
}
// bestMatch tries to pick best matching GroupVersionKind and falls back to the first
// found if no exact match exists.
func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersionKind {
for _, gvk := range targets {
for _, k := range kinds {
if k == gvk {
return k
}
}
}
return targets[0]
}
// ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that
// do not use TypeMeta.
func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) {

View File

@ -23,6 +23,9 @@ import (
"k8s.io/kubernetes/pkg/runtime"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"github.com/golang/glog"
)
// if you use this, there is one behavior change compared to a standard Informer.
@ -74,6 +77,34 @@ func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, resyncPeri
return sharedIndexInformer
}
// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced.
type InformerSynced func() bool
// syncedPollPeriod controls how often you look at the status of your sync funcs
const syncedPollPeriod = 100 * time.Millisecond
// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false
// if the contoller should shutdown
func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
err := wait.PollUntil(syncedPollPeriod,
func() (bool, error) {
for _, syncFunc := range cacheSyncs {
if !syncFunc() {
return false, nil
}
}
return true, nil
},
stopCh)
if err != nil {
glog.V(2).Infof("stop requested")
return false
}
glog.V(4).Infof("caches populated")
return true
}
type sharedIndexInformer struct {
indexer Indexer
controller *Controller

View File

@ -134,7 +134,16 @@ func forcefullyDeleteNode(kubeClient clientset.Interface, nodeName string, force
func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
pod, ok := obj.(*api.Pod)
if !ok {
return
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %#v", obj)
return
}
pod, ok = tombstone.Obj.(*api.Pod)
if !ok {
glog.Errorf("Tombstone contained object that is not a Pod %#v", obj)
return
}
}
// consider only terminating pods

View File

@ -286,9 +286,21 @@ func NewNodeController(
}
},
DeleteFunc: func(obj interface{}) {
node := obj.(*api.Node)
err := nc.cidrAllocator.ReleaseCIDR(node)
if err != nil {
node, isNode := obj.(*api.Node)
// We can get DeletedFinalStateUnknown instead of *api.Node here and we need to handle that correctly. #34692
if !isNode {
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Received unexpected object: %v", obj)
return
}
node, ok = deletedState.Obj.(*api.Node)
if !ok {
glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
return
}
}
if err := nc.cidrAllocator.ReleaseCIDR(node); err != nil {
glog.Errorf("Error releasing CIDR: %v", err)
}
},
@ -385,94 +397,103 @@ func (nc *NodeController) Run() {
go nc.internalPodInformer.Run(wait.NeverStop)
}
// Incorporate the results of node status pushed from kubelet to master.
go wait.Until(func() {
if err := nc.monitorNodeStatus(); err != nil {
glog.Errorf("Error monitoring node status: %v", err)
}
}, nc.nodeMonitorPeriod, wait.NeverStop)
go func() {
defer utilruntime.HandleCrash()
// Managing eviction of nodes:
// 1. when we delete pods off a node, if the node was not empty at the time we then
// queue a termination watcher
// a. If we hit an error, retry deletion
// 2. The terminator loop ensures that pods are eventually cleaned and we never
// terminate a pod in a time period less than nc.maximumGracePeriod. AddedAt
// is the time from which we measure "has this pod been terminating too long",
// after which we will delete the pod with grace period 0 (force delete).
// a. If we hit errors, retry instantly
// b. If there are no pods left terminating, exit
// c. If there are pods still terminating, wait for their estimated completion
// before retrying
go wait.Until(func() {
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
for k := range nc.zonePodEvictor {
nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) {
obj, exists, err := nc.nodeStore.GetByKey(value.Value)
if err != nil {
glog.Warningf("Failed to get Node %v from the nodeStore: %v", value.Value, err)
} else if !exists {
glog.Warningf("Node %v no longer present in nodeStore!", value.Value)
} else {
node, _ := obj.(*api.Node)
zone := utilnode.GetZoneKey(node)
EvictionsNumber.WithLabelValues(zone).Inc()
}
nodeUid, _ := value.UID.(string)
remaining, err := deletePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, nc.daemonSetStore)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err))
return false, 0
}
if remaining {
nc.zoneTerminationEvictor[k].Add(value.Value, value.UID)
}
return true, 0
})
}
}, nodeEvictionPeriod, wait.NeverStop)
// TODO: replace with a controller that ensures pods that are terminating complete
// in a particular time period
go wait.Until(func() {
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
for k := range nc.zoneTerminationEvictor {
nc.zoneTerminationEvictor[k].Try(func(value TimedValue) (bool, time.Duration) {
nodeUid, _ := value.UID.(string)
completed, remaining, err := terminatePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, value.AddedAt, nc.maximumGracePeriod)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to terminate pods on node %q: %v", value.Value, err))
return false, 0
}
if completed {
glog.V(2).Infof("All pods terminated on %s", value.Value)
recordNodeEvent(nc.recorder, value.Value, nodeUid, api.EventTypeNormal, "TerminatedAllPods", fmt.Sprintf("Terminated all Pods on Node %s.", value.Value))
return true, 0
}
glog.V(2).Infof("Pods terminating since %s on %q, estimated completion %s", value.AddedAt, value.Value, remaining)
// clamp very short intervals
if remaining < nodeEvictionPeriod {
remaining = nodeEvictionPeriod
}
return false, remaining
})
}
}, nodeEvictionPeriod, wait.NeverStop)
go wait.Until(func() {
pods, err := nc.podStore.List(labels.Everything())
if err != nil {
utilruntime.HandleError(err)
if !cache.WaitForCacheSync(wait.NeverStop, nc.nodeController.HasSynced, nc.podController.HasSynced, nc.daemonSetController.HasSynced) {
utilruntime.HandleError(errors.New("NodeController timed out while waiting for informers to sync..."))
return
}
cleanupOrphanedPods(pods, nc.nodeStore.Store, nc.forcefullyDeletePod)
}, 30*time.Second, wait.NeverStop)
// Incorporate the results of node status pushed from kubelet to master.
go wait.Until(func() {
if err := nc.monitorNodeStatus(); err != nil {
glog.Errorf("Error monitoring node status: %v", err)
}
}, nc.nodeMonitorPeriod, wait.NeverStop)
// Managing eviction of nodes:
// 1. when we delete pods off a node, if the node was not empty at the time we then
// queue a termination watcher
// a. If we hit an error, retry deletion
// 2. The terminator loop ensures that pods are eventually cleaned and we never
// terminate a pod in a time period less than nc.maximumGracePeriod. AddedAt
// is the time from which we measure "has this pod been terminating too long",
// after which we will delete the pod with grace period 0 (force delete).
// a. If we hit errors, retry instantly
// b. If there are no pods left terminating, exit
// c. If there are pods still terminating, wait for their estimated completion
// before retrying
go wait.Until(func() {
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
for k := range nc.zonePodEvictor {
nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) {
obj, exists, err := nc.nodeStore.GetByKey(value.Value)
if err != nil {
glog.Warningf("Failed to get Node %v from the nodeStore: %v", value.Value, err)
} else if !exists {
glog.Warningf("Node %v no longer present in nodeStore!", value.Value)
} else {
node, _ := obj.(*api.Node)
zone := utilnode.GetZoneKey(node)
EvictionsNumber.WithLabelValues(zone).Inc()
}
nodeUid, _ := value.UID.(string)
remaining, err := deletePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, nc.daemonSetStore)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err))
return false, 0
}
if remaining {
nc.zoneTerminationEvictor[k].Add(value.Value, value.UID)
}
return true, 0
})
}
}, nodeEvictionPeriod, wait.NeverStop)
// TODO: replace with a controller that ensures pods that are terminating complete
// in a particular time period
go wait.Until(func() {
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
for k := range nc.zoneTerminationEvictor {
nc.zoneTerminationEvictor[k].Try(func(value TimedValue) (bool, time.Duration) {
nodeUid, _ := value.UID.(string)
completed, remaining, err := terminatePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, value.AddedAt, nc.maximumGracePeriod)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to terminate pods on node %q: %v", value.Value, err))
return false, 0
}
if completed {
glog.V(2).Infof("All pods terminated on %s", value.Value)
recordNodeEvent(nc.recorder, value.Value, nodeUid, api.EventTypeNormal, "TerminatedAllPods", fmt.Sprintf("Terminated all Pods on Node %s.", value.Value))
return true, 0
}
glog.V(2).Infof("Pods terminating since %s on %q, estimated completion %s", value.AddedAt, value.Value, remaining)
// clamp very short intervals
if remaining < nodeEvictionPeriod {
remaining = nodeEvictionPeriod
}
return false, remaining
})
}
}, nodeEvictionPeriod, wait.NeverStop)
go wait.Until(func() {
pods, err := nc.podStore.List(labels.Everything())
if err != nil {
utilruntime.HandleError(err)
return
}
cleanupOrphanedPods(pods, nc.nodeStore.Store, nc.forcefullyDeletePod)
}, 30*time.Second, wait.NeverStop)
}()
}
// monitorNodeStatus verifies node status are constantly updated by kubelet, and if not,

View File

@ -297,13 +297,16 @@ func (m *Master) InstallAPIs(c *Config) {
// TODO find a better way to configure priority of groups
for _, group := range sets.StringKeySet(c.RESTStorageProviders).List() {
if !c.APIResourceConfigSource.AnyResourcesForGroupEnabled(group) {
glog.V(1).Infof("Skipping disabled API group %q.", group)
continue
}
restStorageBuilder := c.RESTStorageProviders[group]
apiGroupInfo, enabled := restStorageBuilder.NewRESTStorage(c.APIResourceConfigSource, restOptionsGetter)
if !enabled {
glog.Warningf("Problem initializing API group %q, skipping.", group)
continue
}
glog.V(1).Infof("Enabling API group %q.", group)
// This is here so that, if the policy group is present, the eviction
// subresource handler wil be able to find poddisruptionbudgets

View File

@ -189,6 +189,11 @@ func PollInfinite(interval time.Duration, condition ConditionFunc) error {
return WaitFor(poller(interval, 0), condition, done)
}
// PollUntil is like Poll, but it takes a stop change instead of total duration
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
return WaitFor(poller(interval, 0), condition, stopCh)
}
// WaitFunc creates a channel that receives an item every time a test
// should be executed and is closed when the last test should be invoked.
type WaitFunc func(done <-chan struct{}) <-chan struct{}

View File

@ -51,7 +51,7 @@ var (
// semantic version is a git hash, but the version itself is no
// longer the direct output of "git describe", but a slight
// translation to be semver compliant.
gitVersion string = "v1.4.1+$Format:%h$"
gitVersion string = "v1.4.3+$Format:%h$"
gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty"

View File

@ -52,28 +52,34 @@ func New(opts x509.VerifyOptions, user UserConversion) *Authenticator {
// AuthenticateRequest authenticates the request using presented client certificates
func (a *Authenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, error) {
if req.TLS == nil {
if req.TLS == nil || len(req.TLS.PeerCertificates) == 0 {
return nil, false, nil
}
// Use intermediates, if provided
optsCopy := a.opts
if optsCopy.Intermediates == nil && len(req.TLS.PeerCertificates) > 1 {
optsCopy.Intermediates = x509.NewCertPool()
for _, intermediate := range req.TLS.PeerCertificates[1:] {
optsCopy.Intermediates.AddCert(intermediate)
}
}
chains, err := req.TLS.PeerCertificates[0].Verify(optsCopy)
if err != nil {
return nil, false, err
}
var errlist []error
for _, cert := range req.TLS.PeerCertificates {
chains, err := cert.Verify(a.opts)
for _, chain := range chains {
user, ok, err := a.user.User(chain)
if err != nil {
errlist = append(errlist, err)
continue
}
for _, chain := range chains {
user, ok, err := a.user.User(chain)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
return user, ok, err
}
if ok {
return user, ok, err
}
}
return nil, false, utilerrors.NewAggregate(errlist)