Upgrade to Kubernetes 1.4.1
parent
e3a05b0656
commit
2085b894be
File diff suppressed because it is too large
Load Diff
|
@ -45,7 +45,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
kclient "k8s.io/kubernetes/pkg/client/unversioned"
|
kclient "k8s.io/kubernetes/pkg/client/unversioned"
|
||||||
kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||||
kframework "k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
kselector "k8s.io/kubernetes/pkg/fields"
|
kselector "k8s.io/kubernetes/pkg/fields"
|
||||||
etcdutil "k8s.io/kubernetes/pkg/storage/etcd/util"
|
etcdutil "k8s.io/kubernetes/pkg/storage/etcd/util"
|
||||||
"k8s.io/kubernetes/pkg/util/validation"
|
"k8s.io/kubernetes/pkg/util/validation"
|
||||||
|
@ -556,11 +555,11 @@ func newKubeClient() (*kclient.Client, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func watchForServices(kubeClient *kclient.Client, ks *kube2sky) kcache.Store {
|
func watchForServices(kubeClient *kclient.Client, ks *kube2sky) kcache.Store {
|
||||||
serviceStore, serviceController := kframework.NewInformer(
|
serviceStore, serviceController := kcache.NewInformer(
|
||||||
createServiceLW(kubeClient),
|
createServiceLW(kubeClient),
|
||||||
&kapi.Service{},
|
&kapi.Service{},
|
||||||
resyncPeriod,
|
resyncPeriod,
|
||||||
kframework.ResourceEventHandlerFuncs{
|
kcache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: ks.newService,
|
AddFunc: ks.newService,
|
||||||
DeleteFunc: ks.removeService,
|
DeleteFunc: ks.removeService,
|
||||||
UpdateFunc: ks.updateService,
|
UpdateFunc: ks.updateService,
|
||||||
|
@ -571,11 +570,11 @@ func watchForServices(kubeClient *kclient.Client, ks *kube2sky) kcache.Store {
|
||||||
}
|
}
|
||||||
|
|
||||||
func watchEndpoints(kubeClient *kclient.Client, ks *kube2sky) kcache.Store {
|
func watchEndpoints(kubeClient *kclient.Client, ks *kube2sky) kcache.Store {
|
||||||
eStore, eController := kframework.NewInformer(
|
eStore, eController := kcache.NewInformer(
|
||||||
createEndpointsLW(kubeClient),
|
createEndpointsLW(kubeClient),
|
||||||
&kapi.Endpoints{},
|
&kapi.Endpoints{},
|
||||||
resyncPeriod,
|
resyncPeriod,
|
||||||
kframework.ResourceEventHandlerFuncs{
|
kcache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: ks.handleEndpointAdd,
|
AddFunc: ks.handleEndpointAdd,
|
||||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||||
// TODO: Avoid unwanted updates.
|
// TODO: Avoid unwanted updates.
|
||||||
|
@ -589,11 +588,11 @@ func watchEndpoints(kubeClient *kclient.Client, ks *kube2sky) kcache.Store {
|
||||||
}
|
}
|
||||||
|
|
||||||
func watchPods(kubeClient *kclient.Client, ks *kube2sky) kcache.Store {
|
func watchPods(kubeClient *kclient.Client, ks *kube2sky) kcache.Store {
|
||||||
eStore, eController := kframework.NewInformer(
|
eStore, eController := kcache.NewInformer(
|
||||||
createEndpointsPodLW(kubeClient),
|
createEndpointsPodLW(kubeClient),
|
||||||
&kapi.Pod{},
|
&kapi.Pod{},
|
||||||
resyncPeriod,
|
resyncPeriod,
|
||||||
kframework.ResourceEventHandlerFuncs{
|
kcache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: ks.handlePodCreate,
|
AddFunc: ks.handlePodCreate,
|
||||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||||
ks.handlePodUpdate(oldObj, newObj)
|
ks.handlePodUpdate(oldObj, newObj)
|
||||||
|
|
|
@ -1,13 +1,11 @@
|
||||||
language: go
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.4.3
|
||||||
|
- 1.5.4
|
||||||
|
- 1.6.3
|
||||||
|
- tip
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
|
||||||
- go: 1.4.3
|
|
||||||
env: NOVET=true # No bundled vet.
|
|
||||||
- go: 1.5.4
|
|
||||||
- go: 1.6.3
|
|
||||||
- go: 1.7
|
|
||||||
- go: tip
|
|
||||||
allow_failures:
|
allow_failures:
|
||||||
- go: tip
|
- go: tip
|
||||||
|
|
||||||
|
@ -18,7 +16,3 @@ before_install:
|
||||||
script:
|
script:
|
||||||
- PATH=$PATH:$PWD/bin go test -v ./...
|
- PATH=$PATH:$PWD/bin go test -v ./...
|
||||||
- go build
|
- go build
|
||||||
- diff -u <(echo -n) <(gofmt -d -s .)
|
|
||||||
- if [ -z $NOVET ]; then
|
|
||||||
diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint');
|
|
||||||
fi
|
|
||||||
|
|
|
@ -663,7 +663,7 @@ command.SetUsageTemplate(s string)
|
||||||
|
|
||||||
## PreRun or PostRun Hooks
|
## PreRun or PostRun Hooks
|
||||||
|
|
||||||
It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order:
|
It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order:
|
||||||
|
|
||||||
- `PersistentPreRun`
|
- `PersistentPreRun`
|
||||||
- `PreRun`
|
- `PreRun`
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
|
BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extentions"
|
||||||
BashCompCustom = "cobra_annotation_bash_completion_custom"
|
BashCompCustom = "cobra_annotation_bash_completion_custom"
|
||||||
BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
|
BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
|
||||||
BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
|
BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
|
||||||
|
@ -401,9 +401,11 @@ func writeLocalNonPersistentFlag(flag *pflag.Flag, w io.Writer) error {
|
||||||
format += "="
|
format += "="
|
||||||
}
|
}
|
||||||
format += "\")\n"
|
format += "\")\n"
|
||||||
_, err := fmt.Fprintf(w, format, name)
|
if _, err := fmt.Fprintf(w, format, name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func writeFlags(cmd *Command, w io.Writer) error {
|
func writeFlags(cmd *Command, w io.Writer) error {
|
||||||
_, err := fmt.Fprintf(w, ` flags=()
|
_, err := fmt.Fprintf(w, ` flags=()
|
||||||
|
@ -419,9 +421,6 @@ func writeFlags(cmd *Command, w io.Writer) error {
|
||||||
localNonPersistentFlags := cmd.LocalNonPersistentFlags()
|
localNonPersistentFlags := cmd.LocalNonPersistentFlags()
|
||||||
var visitErr error
|
var visitErr error
|
||||||
cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
||||||
if nonCompletableFlag(flag) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := writeFlag(flag, w); err != nil {
|
if err := writeFlag(flag, w); err != nil {
|
||||||
visitErr = err
|
visitErr = err
|
||||||
return
|
return
|
||||||
|
@ -443,9 +442,6 @@ func writeFlags(cmd *Command, w io.Writer) error {
|
||||||
return visitErr
|
return visitErr
|
||||||
}
|
}
|
||||||
cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
||||||
if nonCompletableFlag(flag) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := writeFlag(flag, w); err != nil {
|
if err := writeFlag(flag, w); err != nil {
|
||||||
visitErr = err
|
visitErr = err
|
||||||
return
|
return
|
||||||
|
@ -472,9 +468,6 @@ func writeRequiredFlag(cmd *Command, w io.Writer) error {
|
||||||
flags := cmd.NonInheritedFlags()
|
flags := cmd.NonInheritedFlags()
|
||||||
var visitErr error
|
var visitErr error
|
||||||
flags.VisitAll(func(flag *pflag.Flag) {
|
flags.VisitAll(func(flag *pflag.Flag) {
|
||||||
if nonCompletableFlag(flag) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for key := range flag.Annotations {
|
for key := range flag.Annotations {
|
||||||
switch key {
|
switch key {
|
||||||
case BashCompOneRequiredFlag:
|
case BashCompOneRequiredFlag:
|
||||||
|
@ -581,10 +574,6 @@ func (cmd *Command) GenBashCompletion(w io.Writer) error {
|
||||||
return postscript(w, cmd.Name())
|
return postscript(w, cmd.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
func nonCompletableFlag(flag *pflag.Flag) bool {
|
|
||||||
return flag.Hidden || len(flag.Deprecated) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cmd *Command) GenBashCompletionFile(filename string) error {
|
func (cmd *Command) GenBashCompletionFile(filename string) error {
|
||||||
outFile, err := os.Create(filename)
|
outFile, err := os.Create(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -37,8 +37,8 @@ var templateFuncs = template.FuncMap{
|
||||||
|
|
||||||
var initializers []func()
|
var initializers []func()
|
||||||
|
|
||||||
// Automatic prefix matching can be a dangerous thing to automatically enable in CLI tools.
|
// automatic prefix matching can be a dangerous thing to automatically enable in CLI tools.
|
||||||
// Set this to true to enable it.
|
// Set this to true to enable it
|
||||||
var EnablePrefixMatching = false
|
var EnablePrefixMatching = false
|
||||||
|
|
||||||
//EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
|
//EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
|
||||||
|
@ -61,7 +61,9 @@ func AddTemplateFuncs(tmplFuncs template.FuncMap) {
|
||||||
|
|
||||||
//OnInitialize takes a series of func() arguments and appends them to a slice of func().
|
//OnInitialize takes a series of func() arguments and appends them to a slice of func().
|
||||||
func OnInitialize(y ...func()) {
|
func OnInitialize(y ...func()) {
|
||||||
initializers = append(initializers, y...)
|
for _, x := range y {
|
||||||
|
initializers = append(initializers, x)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
|
//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
|
||||||
|
@ -114,7 +116,7 @@ func trimRightSpace(s string) string {
|
||||||
return strings.TrimRightFunc(s, unicode.IsSpace)
|
return strings.TrimRightFunc(s, unicode.IsSpace)
|
||||||
}
|
}
|
||||||
|
|
||||||
// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
|
// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s
|
||||||
func appendIfNotPresent(s, stringToAppend string) string {
|
func appendIfNotPresent(s, stringToAppend string) string {
|
||||||
if strings.Contains(s, stringToAppend) {
|
if strings.Contains(s, stringToAppend) {
|
||||||
return s
|
return s
|
||||||
|
@ -122,7 +124,7 @@ func appendIfNotPresent(s, stringToAppend string) string {
|
||||||
return s + " " + stringToAppend
|
return s + " " + stringToAppend
|
||||||
}
|
}
|
||||||
|
|
||||||
// rpad adds padding to the right of a string.
|
//rpad adds padding to the right of a string
|
||||||
func rpad(s string, padding int) string {
|
func rpad(s string, padding int) string {
|
||||||
template := fmt.Sprintf("%%-%ds", padding)
|
template := fmt.Sprintf("%%-%ds", padding)
|
||||||
return fmt.Sprintf(template, s)
|
return fmt.Sprintf(template, s)
|
||||||
|
@ -136,7 +138,7 @@ func tmpl(w io.Writer, text string, data interface{}) error {
|
||||||
return t.Execute(w, data)
|
return t.Execute(w, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ld compares two strings and returns the levenshtein distance between them.
|
// ld compares two strings and returns the levenshtein distance between them
|
||||||
func ld(s, t string, ignoreCase bool) int {
|
func ld(s, t string, ignoreCase bool) int {
|
||||||
if ignoreCase {
|
if ignoreCase {
|
||||||
s = strings.ToLower(s)
|
s = strings.ToLower(s)
|
||||||
|
|
|
@ -140,17 +140,17 @@ func (c *Command) SetOutput(output io.Writer) {
|
||||||
c.output = &output
|
c.output = &output
|
||||||
}
|
}
|
||||||
|
|
||||||
// Usage can be defined by application.
|
// Usage can be defined by application
|
||||||
func (c *Command) SetUsageFunc(f func(*Command) error) {
|
func (c *Command) SetUsageFunc(f func(*Command) error) {
|
||||||
c.usageFunc = f
|
c.usageFunc = f
|
||||||
}
|
}
|
||||||
|
|
||||||
// Can be defined by Application.
|
// Can be defined by Application
|
||||||
func (c *Command) SetUsageTemplate(s string) {
|
func (c *Command) SetUsageTemplate(s string) {
|
||||||
c.usageTemplate = s
|
c.usageTemplate = s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Can be defined by Application.
|
// Can be defined by Application
|
||||||
func (c *Command) SetHelpFunc(f func(*Command, []string)) {
|
func (c *Command) SetHelpFunc(f func(*Command, []string)) {
|
||||||
c.helpFunc = f
|
c.helpFunc = f
|
||||||
}
|
}
|
||||||
|
@ -159,7 +159,7 @@ func (c *Command) SetHelpCommand(cmd *Command) {
|
||||||
c.helpCommand = cmd
|
c.helpCommand = cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
// Can be defined by Application.
|
// Can be defined by Application
|
||||||
func (c *Command) SetHelpTemplate(s string) {
|
func (c *Command) SetHelpTemplate(s string) {
|
||||||
c.helpTemplate = s
|
c.helpTemplate = s
|
||||||
}
|
}
|
||||||
|
@ -195,7 +195,7 @@ func (c *Command) getOut(def io.Writer) io.Writer {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UsageFunc returns either the function set by SetUsageFunc for this command
|
// UsageFunc returns either the function set by SetUsageFunc for this command
|
||||||
// or a parent, or it returns a default usage function.
|
// or a parent, or it returns a default usage function
|
||||||
func (c *Command) UsageFunc() (f func(*Command) error) {
|
func (c *Command) UsageFunc() (f func(*Command) error) {
|
||||||
if c.usageFunc != nil {
|
if c.usageFunc != nil {
|
||||||
return c.usageFunc
|
return c.usageFunc
|
||||||
|
@ -214,15 +214,15 @@ func (c *Command) UsageFunc() (f func(*Command) error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Usage puts out the usage for the command.
|
// Output the usage for the command
|
||||||
// Used when a user provides invalid input.
|
// Used when a user provides invalid input
|
||||||
// Can be defined by user by overriding UsageFunc.
|
// Can be defined by user by overriding UsageFunc
|
||||||
func (c *Command) Usage() error {
|
func (c *Command) Usage() error {
|
||||||
return c.UsageFunc()(c)
|
return c.UsageFunc()(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// HelpFunc returns either the function set by SetHelpFunc for this command
|
// HelpFunc returns either the function set by SetHelpFunc for this command
|
||||||
// or a parent, or it returns a function with default help behavior.
|
// or a parent, or it returns a function with default help behavior
|
||||||
func (c *Command) HelpFunc() func(*Command, []string) {
|
func (c *Command) HelpFunc() func(*Command, []string) {
|
||||||
cmd := c
|
cmd := c
|
||||||
for cmd != nil {
|
for cmd != nil {
|
||||||
|
@ -240,9 +240,9 @@ func (c *Command) HelpFunc() func(*Command, []string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Help puts out the help for the command.
|
// Output the help for the command
|
||||||
// Used when a user calls help [command].
|
// Used when a user calls help [command]
|
||||||
// Can be defined by user by overriding HelpFunc.
|
// Can be defined by user by overriding HelpFunc
|
||||||
func (c *Command) Help() error {
|
func (c *Command) Help() error {
|
||||||
c.HelpFunc()(c, []string{})
|
c.HelpFunc()(c, []string{})
|
||||||
return nil
|
return nil
|
||||||
|
@ -333,7 +333,7 @@ func (c *Command) HelpTemplate() string {
|
||||||
{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
|
{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Really only used when casting a command to a commander.
|
// Really only used when casting a command to a commander
|
||||||
func (c *Command) resetChildrensParents() {
|
func (c *Command) resetChildrensParents() {
|
||||||
for _, x := range c.commands {
|
for _, x := range c.commands {
|
||||||
x.parent = c
|
x.parent = c
|
||||||
|
@ -745,13 +745,13 @@ func (c *Command) initHelpCmd() {
|
||||||
c.AddCommand(c.helpCommand)
|
c.AddCommand(c.helpCommand)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used for testing.
|
// Used for testing
|
||||||
func (c *Command) ResetCommands() {
|
func (c *Command) ResetCommands() {
|
||||||
c.commands = nil
|
c.commands = nil
|
||||||
c.helpCommand = nil
|
c.helpCommand = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sorts commands by their names.
|
// Sorts commands by their names
|
||||||
type commandSorterByName []*Command
|
type commandSorterByName []*Command
|
||||||
|
|
||||||
func (c commandSorterByName) Len() int { return len(c) }
|
func (c commandSorterByName) Len() int { return len(c) }
|
||||||
|
@ -831,18 +831,18 @@ main:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
|
// Print is a convenience method to Print to the defined output, fallback to Stderr if not set
|
||||||
func (c *Command) Print(i ...interface{}) {
|
func (c *Command) Print(i ...interface{}) {
|
||||||
fmt.Fprint(c.OutOrStderr(), i...)
|
fmt.Fprint(c.OutOrStderr(), i...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
|
// Println is a convenience method to Println to the defined output, fallback to Stderr if not set
|
||||||
func (c *Command) Println(i ...interface{}) {
|
func (c *Command) Println(i ...interface{}) {
|
||||||
str := fmt.Sprintln(i...)
|
str := fmt.Sprintln(i...)
|
||||||
c.Print(str)
|
c.Print(str)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
|
// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set
|
||||||
func (c *Command) Printf(format string, i ...interface{}) {
|
func (c *Command) Printf(format string, i ...interface{}) {
|
||||||
str := fmt.Sprintf(format, i...)
|
str := fmt.Sprintf(format, i...)
|
||||||
c.Print(str)
|
c.Print(str)
|
||||||
|
@ -859,7 +859,7 @@ func (c *Command) CommandPath() string {
|
||||||
return str
|
return str
|
||||||
}
|
}
|
||||||
|
|
||||||
// UseLine puts out the full usage for a given command (including parents).
|
//The full usage for a given command (including parents)
|
||||||
func (c *Command) UseLine() string {
|
func (c *Command) UseLine() string {
|
||||||
str := ""
|
str := ""
|
||||||
if c.HasParent() {
|
if c.HasParent() {
|
||||||
|
@ -869,7 +869,7 @@ func (c *Command) UseLine() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// For use in determining which flags have been assigned to which commands
|
// For use in determining which flags have been assigned to which commands
|
||||||
// and which persist.
|
// and which persist
|
||||||
func (c *Command) DebugFlags() {
|
func (c *Command) DebugFlags() {
|
||||||
c.Println("DebugFlags called on", c.Name())
|
c.Println("DebugFlags called on", c.Name())
|
||||||
var debugflags func(*Command)
|
var debugflags func(*Command)
|
||||||
|
@ -944,18 +944,18 @@ func (c *Command) HasExample() bool {
|
||||||
return len(c.Example) > 0
|
return len(c.Example) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Runnable determines if the command is itself runnable.
|
// Runnable determines if the command is itself runnable
|
||||||
func (c *Command) Runnable() bool {
|
func (c *Command) Runnable() bool {
|
||||||
return c.Run != nil || c.RunE != nil
|
return c.Run != nil || c.RunE != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasSubCommands determines if the command has children commands.
|
// HasSubCommands determines if the command has children commands
|
||||||
func (c *Command) HasSubCommands() bool {
|
func (c *Command) HasSubCommands() bool {
|
||||||
return len(c.commands) > 0
|
return len(c.commands) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsAvailableCommand determines if a command is available as a non-help command
|
// IsAvailableCommand determines if a command is available as a non-help command
|
||||||
// (this includes all non deprecated/hidden commands).
|
// (this includes all non deprecated/hidden commands)
|
||||||
func (c *Command) IsAvailableCommand() bool {
|
func (c *Command) IsAvailableCommand() bool {
|
||||||
if len(c.Deprecated) != 0 || c.Hidden {
|
if len(c.Deprecated) != 0 || c.Hidden {
|
||||||
return false
|
return false
|
||||||
|
@ -974,7 +974,7 @@ func (c *Command) IsAvailableCommand() bool {
|
||||||
|
|
||||||
// IsHelpCommand determines if a command is a 'help' command; a help command is
|
// IsHelpCommand determines if a command is a 'help' command; a help command is
|
||||||
// determined by the fact that it is NOT runnable/hidden/deprecated, and has no
|
// determined by the fact that it is NOT runnable/hidden/deprecated, and has no
|
||||||
// sub commands that are runnable/hidden/deprecated.
|
// sub commands that are runnable/hidden/deprecated
|
||||||
func (c *Command) IsHelpCommand() bool {
|
func (c *Command) IsHelpCommand() bool {
|
||||||
|
|
||||||
// if a command is runnable, deprecated, or hidden it is not a 'help' command
|
// if a command is runnable, deprecated, or hidden it is not a 'help' command
|
||||||
|
@ -993,9 +993,9 @@ func (c *Command) IsHelpCommand() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasHelpSubCommands determines if a command has any available 'help' sub commands
|
// HasHelpSubCommands determines if a command has any avilable 'help' sub commands
|
||||||
// that need to be shown in the usage/help default template under 'additional help
|
// that need to be shown in the usage/help default template under 'additional help
|
||||||
// topics'.
|
// topics'
|
||||||
func (c *Command) HasHelpSubCommands() bool {
|
func (c *Command) HasHelpSubCommands() bool {
|
||||||
|
|
||||||
// return true on the first found available 'help' sub command
|
// return true on the first found available 'help' sub command
|
||||||
|
@ -1010,7 +1010,7 @@ func (c *Command) HasHelpSubCommands() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasAvailableSubCommands determines if a command has available sub commands that
|
// HasAvailableSubCommands determines if a command has available sub commands that
|
||||||
// need to be shown in the usage/help default template under 'available commands'.
|
// need to be shown in the usage/help default template under 'available commands'
|
||||||
func (c *Command) HasAvailableSubCommands() bool {
|
func (c *Command) HasAvailableSubCommands() bool {
|
||||||
|
|
||||||
// return true on the first found available (non deprecated/help/hidden)
|
// return true on the first found available (non deprecated/help/hidden)
|
||||||
|
@ -1026,18 +1026,17 @@ func (c *Command) HasAvailableSubCommands() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasParent determines if the command is a child command.
|
// Determine if the command is a child command
|
||||||
func (c *Command) HasParent() bool {
|
func (c *Command) HasParent() bool {
|
||||||
return c.parent != nil
|
return c.parent != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists.
|
// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists
|
||||||
func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
|
func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
|
||||||
return c.globNormFunc
|
return c.globNormFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flage returns the complete FlagSet that applies
|
// Get the complete FlagSet that applies to this command (local and persistent declared here and by all parents)
|
||||||
// to this command (local and persistent declared here and by all parents).
|
|
||||||
func (c *Command) Flags() *flag.FlagSet {
|
func (c *Command) Flags() *flag.FlagSet {
|
||||||
if c.flags == nil {
|
if c.flags == nil {
|
||||||
c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||||
|
@ -1049,7 +1048,7 @@ func (c *Command) Flags() *flag.FlagSet {
|
||||||
return c.flags
|
return c.flags
|
||||||
}
|
}
|
||||||
|
|
||||||
// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
|
// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands
|
||||||
func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
|
func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
|
||||||
persistentFlags := c.PersistentFlags()
|
persistentFlags := c.PersistentFlags()
|
||||||
|
|
||||||
|
@ -1062,7 +1061,7 @@ func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// LocalFlags returns the local FlagSet specifically set in the current command.
|
// Get the local FlagSet specifically set in the current command
|
||||||
func (c *Command) LocalFlags() *flag.FlagSet {
|
func (c *Command) LocalFlags() *flag.FlagSet {
|
||||||
c.mergePersistentFlags()
|
c.mergePersistentFlags()
|
||||||
|
|
||||||
|
@ -1080,7 +1079,7 @@ func (c *Command) LocalFlags() *flag.FlagSet {
|
||||||
return local
|
return local
|
||||||
}
|
}
|
||||||
|
|
||||||
// InheritedFlags returns all flags which were inherited from parents commands.
|
// All Flags which were inherited from parents commands
|
||||||
func (c *Command) InheritedFlags() *flag.FlagSet {
|
func (c *Command) InheritedFlags() *flag.FlagSet {
|
||||||
c.mergePersistentFlags()
|
c.mergePersistentFlags()
|
||||||
|
|
||||||
|
@ -1109,12 +1108,12 @@ func (c *Command) InheritedFlags() *flag.FlagSet {
|
||||||
return inherited
|
return inherited
|
||||||
}
|
}
|
||||||
|
|
||||||
// NonInheritedFlags returns all flags which were not inherited from parent commands.
|
// All Flags which were not inherited from parent commands
|
||||||
func (c *Command) NonInheritedFlags() *flag.FlagSet {
|
func (c *Command) NonInheritedFlags() *flag.FlagSet {
|
||||||
return c.LocalFlags()
|
return c.LocalFlags()
|
||||||
}
|
}
|
||||||
|
|
||||||
// PersistentFlags returns the persistent FlagSet specifically set in the current command.
|
// Get the Persistent FlagSet specifically set in the current command
|
||||||
func (c *Command) PersistentFlags() *flag.FlagSet {
|
func (c *Command) PersistentFlags() *flag.FlagSet {
|
||||||
if c.pflags == nil {
|
if c.pflags == nil {
|
||||||
c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||||
|
@ -1126,7 +1125,7 @@ func (c *Command) PersistentFlags() *flag.FlagSet {
|
||||||
return c.pflags
|
return c.pflags
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResetFlags is used in testing.
|
// For use in testing
|
||||||
func (c *Command) ResetFlags() {
|
func (c *Command) ResetFlags() {
|
||||||
c.flagErrorBuf = new(bytes.Buffer)
|
c.flagErrorBuf = new(bytes.Buffer)
|
||||||
c.flagErrorBuf.Reset()
|
c.flagErrorBuf.Reset()
|
||||||
|
@ -1136,50 +1135,50 @@ func (c *Command) ResetFlags() {
|
||||||
c.pflags.SetOutput(c.flagErrorBuf)
|
c.pflags.SetOutput(c.flagErrorBuf)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Does the command contain any flags (local plus persistent from the entire structure).
|
// Does the command contain any flags (local plus persistent from the entire structure)
|
||||||
func (c *Command) HasFlags() bool {
|
func (c *Command) HasFlags() bool {
|
||||||
return c.Flags().HasFlags()
|
return c.Flags().HasFlags()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Does the command contain persistent flags.
|
// Does the command contain persistent flags
|
||||||
func (c *Command) HasPersistentFlags() bool {
|
func (c *Command) HasPersistentFlags() bool {
|
||||||
return c.PersistentFlags().HasFlags()
|
return c.PersistentFlags().HasFlags()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Does the command has flags specifically declared locally.
|
// Does the command has flags specifically declared locally
|
||||||
func (c *Command) HasLocalFlags() bool {
|
func (c *Command) HasLocalFlags() bool {
|
||||||
return c.LocalFlags().HasFlags()
|
return c.LocalFlags().HasFlags()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Does the command have flags inherited from its parent command.
|
// Does the command have flags inherited from its parent command
|
||||||
func (c *Command) HasInheritedFlags() bool {
|
func (c *Command) HasInheritedFlags() bool {
|
||||||
return c.InheritedFlags().HasFlags()
|
return c.InheritedFlags().HasFlags()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Does the command contain any flags (local plus persistent from the entire
|
// Does the command contain any flags (local plus persistent from the entire
|
||||||
// structure) which are not hidden or deprecated.
|
// structure) which are not hidden or deprecated
|
||||||
func (c *Command) HasAvailableFlags() bool {
|
func (c *Command) HasAvailableFlags() bool {
|
||||||
return c.Flags().HasAvailableFlags()
|
return c.Flags().HasAvailableFlags()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Does the command contain persistent flags which are not hidden or deprecated.
|
// Does the command contain persistent flags which are not hidden or deprecated
|
||||||
func (c *Command) HasAvailablePersistentFlags() bool {
|
func (c *Command) HasAvailablePersistentFlags() bool {
|
||||||
return c.PersistentFlags().HasAvailableFlags()
|
return c.PersistentFlags().HasAvailableFlags()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Does the command has flags specifically declared locally which are not hidden
|
// Does the command has flags specifically declared locally which are not hidden
|
||||||
// or deprecated.
|
// or deprecated
|
||||||
func (c *Command) HasAvailableLocalFlags() bool {
|
func (c *Command) HasAvailableLocalFlags() bool {
|
||||||
return c.LocalFlags().HasAvailableFlags()
|
return c.LocalFlags().HasAvailableFlags()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Does the command have flags inherited from its parent command which are
|
// Does the command have flags inherited from its parent command which are
|
||||||
// not hidden or deprecated.
|
// not hidden or deprecated
|
||||||
func (c *Command) HasAvailableInheritedFlags() bool {
|
func (c *Command) HasAvailableInheritedFlags() bool {
|
||||||
return c.InheritedFlags().HasAvailableFlags()
|
return c.InheritedFlags().HasAvailableFlags()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flag climbs up the command tree looking for matching flag.
|
// Flag climbs up the command tree looking for matching flag
|
||||||
func (c *Command) Flag(name string) (flag *flag.Flag) {
|
func (c *Command) Flag(name string) (flag *flag.Flag) {
|
||||||
flag = c.Flags().Lookup(name)
|
flag = c.Flags().Lookup(name)
|
||||||
|
|
||||||
|
@ -1190,7 +1189,7 @@ func (c *Command) Flag(name string) (flag *flag.Flag) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recursively find matching persistent flag.
|
// recursively find matching persistent flag
|
||||||
func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
|
func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
|
||||||
if c.HasPersistentFlags() {
|
if c.HasPersistentFlags() {
|
||||||
flag = c.PersistentFlags().Lookup(name)
|
flag = c.PersistentFlags().Lookup(name)
|
||||||
|
@ -1202,7 +1201,7 @@ func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseFlags parses persistent flag tree and local flags.
|
// ParseFlags parses persistent flag tree & local flags
|
||||||
func (c *Command) ParseFlags(args []string) (err error) {
|
func (c *Command) ParseFlags(args []string) (err error) {
|
||||||
if c.DisableFlagParsing {
|
if c.DisableFlagParsing {
|
||||||
return nil
|
return nil
|
||||||
|
@ -1212,7 +1211,7 @@ func (c *Command) ParseFlags(args []string) (err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parent returns a commands parent command.
|
// Parent returns a commands parent command
|
||||||
func (c *Command) Parent() *Command {
|
func (c *Command) Parent() *Command {
|
||||||
return c.parent
|
return c.parent
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,12 +5,8 @@ language: go
|
||||||
go:
|
go:
|
||||||
- 1.5.4
|
- 1.5.4
|
||||||
- 1.6.3
|
- 1.6.3
|
||||||
- 1.7
|
|
||||||
- tip
|
- tip
|
||||||
|
|
||||||
matrix:
|
|
||||||
allow_failures:
|
|
||||||
- go: tip
|
|
||||||
install:
|
install:
|
||||||
- go get github.com/golang/lint/golint
|
- go get github.com/golang/lint/golint
|
||||||
- export PATH=$GOPATH/bin:$PATH
|
- export PATH=$GOPATH/bin:$PATH
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
// optional interface to indicate boolean flags that can be
|
// optional interface to indicate boolean flags that can be
|
||||||
// supplied without "=value" text
|
// supplied without "=value" text
|
||||||
|
@ -27,7 +30,7 @@ func (b *boolValue) Type() string {
|
||||||
return "bool"
|
return "bool"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
|
func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) }
|
||||||
|
|
||||||
func (b *boolValue) IsBoolFlag() bool { return true }
|
func (b *boolValue) IsBoolFlag() bool { return true }
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
// -- count Value
|
// -- count Value
|
||||||
type countValue int
|
type countValue int
|
||||||
|
@ -25,7 +28,7 @@ func (i *countValue) Type() string {
|
||||||
return "count"
|
return "count"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *countValue) String() string { return strconv.Itoa(int(*i)) }
|
func (i *countValue) String() string { return fmt.Sprintf("%v", *i) }
|
||||||
|
|
||||||
func countConv(sval string) (interface{}, error) {
|
func countConv(sval string) (interface{}, error) {
|
||||||
i, err := strconv.Atoi(sval)
|
i, err := strconv.Atoi(sval)
|
||||||
|
|
|
@ -419,25 +419,10 @@ func (f *FlagSet) PrintDefaults() {
|
||||||
fmt.Fprintf(f.out(), "%s", usages)
|
fmt.Fprintf(f.out(), "%s", usages)
|
||||||
}
|
}
|
||||||
|
|
||||||
// defaultIsZeroValue returns true if the default value for this flag represents
|
// isZeroValue guesses whether the string represents the zero
|
||||||
// a zero value.
|
// value for a flag. It is not accurate but in practice works OK.
|
||||||
func (f *Flag) defaultIsZeroValue() bool {
|
func isZeroValue(value string) bool {
|
||||||
switch f.Value.(type) {
|
switch value {
|
||||||
case boolFlag:
|
|
||||||
return f.DefValue == "false"
|
|
||||||
case *durationValue:
|
|
||||||
// Beginning in Go 1.7, duration zero values are "0s"
|
|
||||||
return f.DefValue == "0" || f.DefValue == "0s"
|
|
||||||
case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:
|
|
||||||
return f.DefValue == "0"
|
|
||||||
case *stringValue:
|
|
||||||
return f.DefValue == ""
|
|
||||||
case *ipValue, *ipMaskValue, *ipNetValue:
|
|
||||||
return f.DefValue == "<nil>"
|
|
||||||
case *intSliceValue, *stringSliceValue, *stringArrayValue:
|
|
||||||
return f.DefValue == "[]"
|
|
||||||
default:
|
|
||||||
switch f.Value.String() {
|
|
||||||
case "false":
|
case "false":
|
||||||
return true
|
return true
|
||||||
case "<nil>":
|
case "<nil>":
|
||||||
|
@ -449,7 +434,6 @@ func (f *Flag) defaultIsZeroValue() bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// UnquoteUsage extracts a back-quoted name from the usage
|
// UnquoteUsage extracts a back-quoted name from the usage
|
||||||
// string for a flag and returns it and the un-quoted usage.
|
// string for a flag and returns it and the un-quoted usage.
|
||||||
|
@ -471,19 +455,22 @@ func UnquoteUsage(flag *Flag) (name string, usage string) {
|
||||||
break // Only one back quote; use type name.
|
break // Only one back quote; use type name.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// No explicit name, so use type if we can find one.
|
||||||
name = flag.Value.Type()
|
name = "value"
|
||||||
switch name {
|
switch flag.Value.(type) {
|
||||||
case "bool":
|
case boolFlag:
|
||||||
name = ""
|
name = ""
|
||||||
case "float64":
|
case *durationValue:
|
||||||
|
name = "duration"
|
||||||
|
case *float64Value:
|
||||||
name = "float"
|
name = "float"
|
||||||
case "int64":
|
case *intValue, *int64Value:
|
||||||
name = "int"
|
name = "int"
|
||||||
case "uint64":
|
case *stringValue:
|
||||||
|
name = "string"
|
||||||
|
case *uintValue, *uint64Value:
|
||||||
name = "uint"
|
name = "uint"
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -532,7 +519,7 @@ func (f *FlagSet) FlagUsages() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
line += usage
|
line += usage
|
||||||
if !flag.defaultIsZeroValue() {
|
if !isZeroValue(flag.DefValue) {
|
||||||
if flag.Value.Type() == "string" {
|
if flag.Value.Type() == "string" {
|
||||||
line += fmt.Sprintf(" (default %q)", flag.DefValue)
|
line += fmt.Sprintf(" (default %q)", flag.DefValue)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
// -- float32 Value
|
// -- float32 Value
|
||||||
type float32Value float32
|
type float32Value float32
|
||||||
|
@ -20,7 +23,7 @@ func (f *float32Value) Type() string {
|
||||||
return "float32"
|
return "float32"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) }
|
func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||||
|
|
||||||
func float32Conv(sval string) (interface{}, error) {
|
func float32Conv(sval string) (interface{}, error) {
|
||||||
v, err := strconv.ParseFloat(sval, 32)
|
v, err := strconv.ParseFloat(sval, 32)
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
// -- float64 Value
|
// -- float64 Value
|
||||||
type float64Value float64
|
type float64Value float64
|
||||||
|
@ -20,7 +23,7 @@ func (f *float64Value) Type() string {
|
||||||
return "float64"
|
return "float64"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
|
func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||||
|
|
||||||
func float64Conv(sval string) (interface{}, error) {
|
func float64Conv(sval string) (interface{}, error) {
|
||||||
return strconv.ParseFloat(sval, 64)
|
return strconv.ParseFloat(sval, 64)
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
// -- int Value
|
// -- int Value
|
||||||
type intValue int
|
type intValue int
|
||||||
|
@ -20,7 +23,7 @@ func (i *intValue) Type() string {
|
||||||
return "int"
|
return "int"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
|
func (i *intValue) String() string { return fmt.Sprintf("%v", *i) }
|
||||||
|
|
||||||
func intConv(sval string) (interface{}, error) {
|
func intConv(sval string) (interface{}, error) {
|
||||||
return strconv.Atoi(sval)
|
return strconv.Atoi(sval)
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
// -- int32 Value
|
// -- int32 Value
|
||||||
type int32Value int32
|
type int32Value int32
|
||||||
|
@ -20,7 +23,7 @@ func (i *int32Value) Type() string {
|
||||||
return "int32"
|
return "int32"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) }
|
func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) }
|
||||||
|
|
||||||
func int32Conv(sval string) (interface{}, error) {
|
func int32Conv(sval string) (interface{}, error) {
|
||||||
v, err := strconv.ParseInt(sval, 0, 32)
|
v, err := strconv.ParseInt(sval, 0, 32)
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
// -- int64 Value
|
// -- int64 Value
|
||||||
type int64Value int64
|
type int64Value int64
|
||||||
|
@ -20,7 +23,7 @@ func (i *int64Value) Type() string {
|
||||||
return "int64"
|
return "int64"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
|
func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) }
|
||||||
|
|
||||||
func int64Conv(sval string) (interface{}, error) {
|
func int64Conv(sval string) (interface{}, error) {
|
||||||
return strconv.ParseInt(sval, 0, 64)
|
return strconv.ParseInt(sval, 0, 64)
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
// -- int8 Value
|
// -- int8 Value
|
||||||
type int8Value int8
|
type int8Value int8
|
||||||
|
@ -20,7 +23,7 @@ func (i *int8Value) Type() string {
|
||||||
return "int8"
|
return "int8"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) }
|
func (i *int8Value) String() string { return fmt.Sprintf("%v", *i) }
|
||||||
|
|
||||||
func int8Conv(sval string) (interface{}, error) {
|
func int8Conv(sval string) (interface{}, error) {
|
||||||
v, err := strconv.ParseInt(sval, 0, 8)
|
v, err := strconv.ParseInt(sval, 0, 8)
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
// -- string Value
|
// -- string Value
|
||||||
type stringValue string
|
type stringValue string
|
||||||
|
|
||||||
|
@ -16,7 +18,7 @@ func (s *stringValue) Type() string {
|
||||||
return "string"
|
return "string"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stringValue) String() string { return string(*s) }
|
func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) }
|
||||||
|
|
||||||
func stringConv(sval string) (interface{}, error) {
|
func stringConv(sval string) (interface{}, error) {
|
||||||
return sval, nil
|
return sval, nil
|
||||||
|
|
|
@ -1,110 +0,0 @@
|
||||||
package pflag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ = fmt.Fprint
|
|
||||||
|
|
||||||
// -- stringArray Value
|
|
||||||
type stringArrayValue struct {
|
|
||||||
value *[]string
|
|
||||||
changed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStringArrayValue(val []string, p *[]string) *stringArrayValue {
|
|
||||||
ssv := new(stringArrayValue)
|
|
||||||
ssv.value = p
|
|
||||||
*ssv.value = val
|
|
||||||
return ssv
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stringArrayValue) Set(val string) error {
|
|
||||||
if !s.changed {
|
|
||||||
*s.value = []string{val}
|
|
||||||
s.changed = true
|
|
||||||
} else {
|
|
||||||
*s.value = append(*s.value, val)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stringArrayValue) Type() string {
|
|
||||||
return "stringArray"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stringArrayValue) String() string {
|
|
||||||
str, _ := writeAsCSV(*s.value)
|
|
||||||
return "[" + str + "]"
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringArrayConv(sval string) (interface{}, error) {
|
|
||||||
sval = strings.Trim(sval, "[]")
|
|
||||||
// An empty string would cause a array with one (empty) string
|
|
||||||
if len(sval) == 0 {
|
|
||||||
return []string{}, nil
|
|
||||||
}
|
|
||||||
return readAsCSV(sval)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringArray return the []string value of a flag with the given name
|
|
||||||
func (f *FlagSet) GetStringArray(name string) ([]string, error) {
|
|
||||||
val, err := f.getFlagType(name, "stringArray", stringArrayConv)
|
|
||||||
if err != nil {
|
|
||||||
return []string{}, err
|
|
||||||
}
|
|
||||||
return val.([]string), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringArrayVar defines a string flag with specified name, default value, and usage string.
|
|
||||||
// The argument p points to a []string variable in which to store the values of the multiple flags.
|
|
||||||
// The value of each argument will not try to be separated by comma
|
|
||||||
func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) {
|
|
||||||
f.VarP(newStringArrayValue(value, p), name, "", usage)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
|
|
||||||
func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
|
|
||||||
f.VarP(newStringArrayValue(value, p), name, shorthand, usage)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringArrayVar defines a string flag with specified name, default value, and usage string.
|
|
||||||
// The argument p points to a []string variable in which to store the value of the flag.
|
|
||||||
// The value of each argument will not try to be separated by comma
|
|
||||||
func StringArrayVar(p *[]string, name string, value []string, usage string) {
|
|
||||||
CommandLine.VarP(newStringArrayValue(value, p), name, "", usage)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
|
|
||||||
func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
|
|
||||||
CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringArray defines a string flag with specified name, default value, and usage string.
|
|
||||||
// The return value is the address of a []string variable that stores the value of the flag.
|
|
||||||
// The value of each argument will not try to be separated by comma
|
|
||||||
func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string {
|
|
||||||
p := []string{}
|
|
||||||
f.StringArrayVarP(&p, name, "", value, usage)
|
|
||||||
return &p
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
|
|
||||||
func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string {
|
|
||||||
p := []string{}
|
|
||||||
f.StringArrayVarP(&p, name, shorthand, value, usage)
|
|
||||||
return &p
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringArray defines a string flag with specified name, default value, and usage string.
|
|
||||||
// The return value is the address of a []string variable that stores the value of the flag.
|
|
||||||
// The value of each argument will not try to be separated by comma
|
|
||||||
func StringArray(name string, value []string, usage string) *[]string {
|
|
||||||
return CommandLine.StringArrayP(name, "", value, usage)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
|
|
||||||
func StringArrayP(name, shorthand string, value []string, usage string) *[]string {
|
|
||||||
return CommandLine.StringArrayP(name, shorthand, value, usage)
|
|
||||||
}
|
|
|
@ -1,7 +1,6 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/csv"
|
"encoding/csv"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -22,28 +21,10 @@ func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
|
||||||
return ssv
|
return ssv
|
||||||
}
|
}
|
||||||
|
|
||||||
func readAsCSV(val string) ([]string, error) {
|
func (s *stringSliceValue) Set(val string) error {
|
||||||
if val == "" {
|
|
||||||
return []string{}, nil
|
|
||||||
}
|
|
||||||
stringReader := strings.NewReader(val)
|
stringReader := strings.NewReader(val)
|
||||||
csvReader := csv.NewReader(stringReader)
|
csvReader := csv.NewReader(stringReader)
|
||||||
return csvReader.Read()
|
v, err := csvReader.Read()
|
||||||
}
|
|
||||||
|
|
||||||
func writeAsCSV(vals []string) (string, error) {
|
|
||||||
b := &bytes.Buffer{}
|
|
||||||
w := csv.NewWriter(b)
|
|
||||||
err := w.Write(vals)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
w.Flush()
|
|
||||||
return strings.TrimSuffix(b.String(), fmt.Sprintln()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stringSliceValue) Set(val string) error {
|
|
||||||
v, err := readAsCSV(val)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -60,10 +41,7 @@ func (s *stringSliceValue) Type() string {
|
||||||
return "stringSlice"
|
return "stringSlice"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stringSliceValue) String() string {
|
func (s *stringSliceValue) String() string { return "[" + strings.Join(*s.value, ",") + "]" }
|
||||||
str, _ := writeAsCSV(*s.value)
|
|
||||||
return "[" + str + "]"
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringSliceConv(sval string) (interface{}, error) {
|
func stringSliceConv(sval string) (interface{}, error) {
|
||||||
sval = strings.Trim(sval, "[]")
|
sval = strings.Trim(sval, "[]")
|
||||||
|
@ -71,7 +49,8 @@ func stringSliceConv(sval string) (interface{}, error) {
|
||||||
if len(sval) == 0 {
|
if len(sval) == 0 {
|
||||||
return []string{}, nil
|
return []string{}, nil
|
||||||
}
|
}
|
||||||
return readAsCSV(sval)
|
v := strings.Split(sval, ",")
|
||||||
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStringSlice return the []string value of a flag with the given name
|
// GetStringSlice return the []string value of a flag with the given name
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
// -- uint Value
|
// -- uint Value
|
||||||
type uintValue uint
|
type uintValue uint
|
||||||
|
@ -20,7 +23,7 @@ func (i *uintValue) Type() string {
|
||||||
return "uint"
|
return "uint"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
|
func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) }
|
||||||
|
|
||||||
func uintConv(sval string) (interface{}, error) {
|
func uintConv(sval string) (interface{}, error) {
|
||||||
v, err := strconv.ParseUint(sval, 0, 0)
|
v, err := strconv.ParseUint(sval, 0, 0)
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
// -- uint16 value
|
// -- uint16 value
|
||||||
type uint16Value uint16
|
type uint16Value uint16
|
||||||
|
@ -9,7 +12,7 @@ func newUint16Value(val uint16, p *uint16) *uint16Value {
|
||||||
*p = val
|
*p = val
|
||||||
return (*uint16Value)(p)
|
return (*uint16Value)(p)
|
||||||
}
|
}
|
||||||
|
func (i *uint16Value) String() string { return fmt.Sprintf("%d", *i) }
|
||||||
func (i *uint16Value) Set(s string) error {
|
func (i *uint16Value) Set(s string) error {
|
||||||
v, err := strconv.ParseUint(s, 0, 16)
|
v, err := strconv.ParseUint(s, 0, 16)
|
||||||
*i = uint16Value(v)
|
*i = uint16Value(v)
|
||||||
|
@ -20,8 +23,6 @@ func (i *uint16Value) Type() string {
|
||||||
return "uint16"
|
return "uint16"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
|
|
||||||
|
|
||||||
func uint16Conv(sval string) (interface{}, error) {
|
func uint16Conv(sval string) (interface{}, error) {
|
||||||
v, err := strconv.ParseUint(sval, 0, 16)
|
v, err := strconv.ParseUint(sval, 0, 16)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1,15 +1,18 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
// -- uint32 value
|
// -- uint16 value
|
||||||
type uint32Value uint32
|
type uint32Value uint32
|
||||||
|
|
||||||
func newUint32Value(val uint32, p *uint32) *uint32Value {
|
func newUint32Value(val uint32, p *uint32) *uint32Value {
|
||||||
*p = val
|
*p = val
|
||||||
return (*uint32Value)(p)
|
return (*uint32Value)(p)
|
||||||
}
|
}
|
||||||
|
func (i *uint32Value) String() string { return fmt.Sprintf("%d", *i) }
|
||||||
func (i *uint32Value) Set(s string) error {
|
func (i *uint32Value) Set(s string) error {
|
||||||
v, err := strconv.ParseUint(s, 0, 32)
|
v, err := strconv.ParseUint(s, 0, 32)
|
||||||
*i = uint32Value(v)
|
*i = uint32Value(v)
|
||||||
|
@ -20,8 +23,6 @@ func (i *uint32Value) Type() string {
|
||||||
return "uint32"
|
return "uint32"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
|
|
||||||
|
|
||||||
func uint32Conv(sval string) (interface{}, error) {
|
func uint32Conv(sval string) (interface{}, error) {
|
||||||
v, err := strconv.ParseUint(sval, 0, 32)
|
v, err := strconv.ParseUint(sval, 0, 32)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
// -- uint64 Value
|
// -- uint64 Value
|
||||||
type uint64Value uint64
|
type uint64Value uint64
|
||||||
|
@ -20,7 +23,7 @@ func (i *uint64Value) Type() string {
|
||||||
return "uint64"
|
return "uint64"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
|
func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) }
|
||||||
|
|
||||||
func uint64Conv(sval string) (interface{}, error) {
|
func uint64Conv(sval string) (interface{}, error) {
|
||||||
v, err := strconv.ParseUint(sval, 0, 64)
|
v, err := strconv.ParseUint(sval, 0, 64)
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package pflag
|
package pflag
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
// -- uint8 Value
|
// -- uint8 Value
|
||||||
type uint8Value uint8
|
type uint8Value uint8
|
||||||
|
@ -20,7 +23,7 @@ func (i *uint8Value) Type() string {
|
||||||
return "uint8"
|
return "uint8"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
|
func (i *uint8Value) String() string { return fmt.Sprintf("%v", *i) }
|
||||||
|
|
||||||
func uint8Conv(sval string) (interface{}, error) {
|
func uint8Conv(sval string) (interface{}, error) {
|
||||||
v, err := strconv.ParseUint(sval, 0, 8)
|
v, err := strconv.ParseUint(sval, 0, 8)
|
||||||
|
|
|
@ -43,7 +43,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/apiserver/authenticator"
|
"k8s.io/kubernetes/pkg/apiserver/authenticator"
|
||||||
"k8s.io/kubernetes/pkg/capabilities"
|
"k8s.io/kubernetes/pkg/capabilities"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||||
"k8s.io/kubernetes/pkg/genericapiserver"
|
"k8s.io/kubernetes/pkg/genericapiserver"
|
||||||
"k8s.io/kubernetes/pkg/genericapiserver/authorizer"
|
"k8s.io/kubernetes/pkg/genericapiserver/authorizer"
|
||||||
|
@ -140,6 +140,28 @@ func Run(s *options.APIServer) error {
|
||||||
glog.Fatalf("Failed to start kubelet client: %v", err)
|
glog.Fatalf("Failed to start kubelet client: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.StorageConfig.DeserializationCacheSize == 0 {
|
||||||
|
// When size of cache is not explicitly set, estimate its size based on
|
||||||
|
// target memory usage.
|
||||||
|
glog.V(2).Infof("Initalizing deserialization cache size based on %dMB limit", s.TargetRAMMB)
|
||||||
|
|
||||||
|
// This is the heuristics that from memory capacity is trying to infer
|
||||||
|
// the maximum number of nodes in the cluster and set cache sizes based
|
||||||
|
// on that value.
|
||||||
|
// From our documentation, we officially recomment 120GB machines for
|
||||||
|
// 2000 nodes, and we scale from that point. Thus we assume ~60MB of
|
||||||
|
// capacity per node.
|
||||||
|
// TODO: We may consider deciding that some percentage of memory will
|
||||||
|
// be used for the deserialization cache and divide it by the max object
|
||||||
|
// size to compute its size. We may even go further and measure
|
||||||
|
// collective sizes of the objects in the cache.
|
||||||
|
clusterSize := s.TargetRAMMB / 60
|
||||||
|
s.StorageConfig.DeserializationCacheSize = 25 * clusterSize
|
||||||
|
if s.StorageConfig.DeserializationCacheSize < 1000 {
|
||||||
|
s.StorageConfig.DeserializationCacheSize = 1000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
storageGroupsToEncodingVersion, err := s.StorageGroupsToEncodingVersion()
|
storageGroupsToEncodingVersion, err := s.StorageGroupsToEncodingVersion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("error generating storage version map: %s", err)
|
glog.Fatalf("error generating storage version map: %s", err)
|
||||||
|
|
|
@ -51,9 +51,9 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/controller/deployment"
|
"k8s.io/kubernetes/pkg/controller/deployment"
|
||||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||||
endpointcontroller "k8s.io/kubernetes/pkg/controller/endpoint"
|
endpointcontroller "k8s.io/kubernetes/pkg/controller/endpoint"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/garbagecollector"
|
"k8s.io/kubernetes/pkg/controller/garbagecollector"
|
||||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||||
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
"k8s.io/kubernetes/pkg/controller/job"
|
"k8s.io/kubernetes/pkg/controller/job"
|
||||||
namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace"
|
namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace"
|
||||||
nodecontroller "k8s.io/kubernetes/pkg/controller/node"
|
nodecontroller "k8s.io/kubernetes/pkg/controller/node"
|
||||||
|
|
|
@ -85,9 +85,12 @@ func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {
|
||||||
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||||
fs.DurationVar(&s.UDPIdleTimeout.Duration, "udp-timeout", s.UDPIdleTimeout.Duration, "How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace")
|
fs.DurationVar(&s.UDPIdleTimeout.Duration, "udp-timeout", s.UDPIdleTimeout.Duration, "How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace")
|
||||||
fs.Int32Var(&s.ConntrackMax, "conntrack-max", s.ConntrackMax,
|
fs.Int32Var(&s.ConntrackMax, "conntrack-max", s.ConntrackMax,
|
||||||
"Maximum number of NAT connections to track (0 to leave as-is).")
|
"Maximum number of NAT connections to track (0 to leave as-is). This overrides conntrack-max-per-core and conntrack-min.")
|
||||||
|
fs.MarkDeprecated("conntrack-max", "This feature will be removed in a later release.")
|
||||||
fs.Int32Var(&s.ConntrackMaxPerCore, "conntrack-max-per-core", s.ConntrackMaxPerCore,
|
fs.Int32Var(&s.ConntrackMaxPerCore, "conntrack-max-per-core", s.ConntrackMaxPerCore,
|
||||||
"Maximum number of NAT connections to track per CPU core (0 to leave as-is). This is only considered if conntrack-max is 0.")
|
"Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min).")
|
||||||
|
fs.Int32Var(&s.ConntrackMin, "conntrack-min", s.ConntrackMin,
|
||||||
|
"Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is).")
|
||||||
fs.DurationVar(&s.ConntrackTCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", s.ConntrackTCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)")
|
fs.DurationVar(&s.ConntrackTCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", s.ConntrackTCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)")
|
||||||
config.DefaultFeatureGate.AddFlag(fs)
|
config.DefaultFeatureGate.AddFlag(fs)
|
||||||
}
|
}
|
||||||
|
|
|
@ -335,13 +335,22 @@ func (s *ProxyServer) Run() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getConntrackMax(config *options.ProxyServerConfig) (int, error) {
|
func getConntrackMax(config *options.ProxyServerConfig) (int, error) {
|
||||||
if config.ConntrackMax > 0 && config.ConntrackMaxPerCore > 0 {
|
if config.ConntrackMax > 0 {
|
||||||
|
if config.ConntrackMaxPerCore > 0 {
|
||||||
return -1, fmt.Errorf("invalid config: ConntrackMax and ConntrackMaxPerCore are mutually exclusive")
|
return -1, fmt.Errorf("invalid config: ConntrackMax and ConntrackMaxPerCore are mutually exclusive")
|
||||||
}
|
}
|
||||||
if config.ConntrackMax > 0 {
|
glog.V(3).Infof("getConntrackMax: using absolute conntrax-max (deprecated)")
|
||||||
return int(config.ConntrackMax), nil
|
return int(config.ConntrackMax), nil
|
||||||
} else if config.ConntrackMaxPerCore > 0 {
|
}
|
||||||
return (int(config.ConntrackMaxPerCore) * runtime.NumCPU()), nil
|
if config.ConntrackMaxPerCore > 0 {
|
||||||
|
floor := int(config.ConntrackMin)
|
||||||
|
scaled := int(config.ConntrackMaxPerCore) * runtime.NumCPU()
|
||||||
|
if scaled > floor {
|
||||||
|
glog.V(3).Infof("getConntrackMax: using scaled conntrax-max-per-core")
|
||||||
|
return scaled, nil
|
||||||
|
}
|
||||||
|
glog.V(3).Infof("getConntrackMax: using conntrax-min")
|
||||||
|
return floor, nil
|
||||||
}
|
}
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||||
package admission
|
package admission
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PluginInitializer is used for initialization of shareable resources between admission plugins.
|
// PluginInitializer is used for initialization of shareable resources between admission plugins.
|
||||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||||
package admission
|
package admission
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Validator holds Validate functions, which are responsible for validation of initialized shared resources
|
// Validator holds Validate functions, which are responsible for validation of initialized shared resources
|
||||||
|
|
|
@ -2568,6 +2568,9 @@ func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList {
|
||||||
allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...)
|
allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(freehan): allow user to update loadbalancerSourceRanges
|
||||||
|
allErrs = append(allErrs, ValidateImmutableField(service.Spec.LoadBalancerSourceRanges, oldService.Spec.LoadBalancerSourceRanges, field.NewPath("spec", "loadBalancerSourceRanges"))...)
|
||||||
|
|
||||||
allErrs = append(allErrs, ValidateService(service)...)
|
allErrs = append(allErrs, ValidateService(service)...)
|
||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -66,12 +66,14 @@ type KubeProxyConfiguration struct {
|
||||||
// Must be greater than 0. Only applicable for proxyMode=userspace.
|
// Must be greater than 0. Only applicable for proxyMode=userspace.
|
||||||
UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"`
|
UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"`
|
||||||
// conntrackMax is the maximum number of NAT connections to track (0 to
|
// conntrackMax is the maximum number of NAT connections to track (0 to
|
||||||
// leave as-is). This takes precedence over conntrackMaxPerCore.
|
// leave as-is). This takes precedence over conntrackMaxPerCore and conntrackMin.
|
||||||
ConntrackMax int32 `json:"conntrackMax"`
|
ConntrackMax int32 `json:"conntrackMax"`
|
||||||
// conntrackMaxPerCore is the maximum number of NAT connections to track
|
// conntrackMaxPerCore is the maximum number of NAT connections to track
|
||||||
// per CPU core (0 to leave as-is). This value is only considered if
|
// per CPU core (0 to leave the limit as-is and ignore conntrackMin).
|
||||||
// conntrackMax == 0.
|
|
||||||
ConntrackMaxPerCore int32 `json:"conntrackMaxPerCore"`
|
ConntrackMaxPerCore int32 `json:"conntrackMaxPerCore"`
|
||||||
|
// conntrackMin is the minimum value of connect-tracking records to allocate,
|
||||||
|
// regardless of conntrackMaxPerCore (set conntrackMaxPerCore=0 to leave the limit as-is).
|
||||||
|
ConntrackMin int32 `json:"conntrackMin"`
|
||||||
// conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open
|
// conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open
|
||||||
// (e.g. '250ms', '2s'). Must be greater than 0.
|
// (e.g. '250ms', '2s'). Must be greater than 0.
|
||||||
ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"`
|
ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"`
|
||||||
|
|
|
@ -89,6 +89,9 @@ func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) {
|
||||||
if obj.ConntrackMaxPerCore == 0 {
|
if obj.ConntrackMaxPerCore == 0 {
|
||||||
obj.ConntrackMaxPerCore = 32 * 1024
|
obj.ConntrackMaxPerCore = 32 * 1024
|
||||||
}
|
}
|
||||||
|
if obj.ConntrackMin == 0 {
|
||||||
|
obj.ConntrackMin = 128 * 1024
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if obj.IPTablesMasqueradeBit == nil {
|
if obj.IPTablesMasqueradeBit == nil {
|
||||||
temp := int32(14)
|
temp := int32(14)
|
||||||
|
|
|
@ -63,12 +63,14 @@ type KubeProxyConfiguration struct {
|
||||||
// Must be greater than 0. Only applicable for proxyMode=userspace.
|
// Must be greater than 0. Only applicable for proxyMode=userspace.
|
||||||
UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"`
|
UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"`
|
||||||
// conntrackMax is the maximum number of NAT connections to track (0 to
|
// conntrackMax is the maximum number of NAT connections to track (0 to
|
||||||
// leave as-is). This takes precedence over conntrackMaxPerCore.
|
// leave as-is). This takes precedence over conntrackMaxPerCore and conntrackMin.
|
||||||
ConntrackMax int32 `json:"conntrackMax"`
|
ConntrackMax int32 `json:"conntrackMax"`
|
||||||
// conntrackMaxPerCore is the maximum number of NAT connections to track
|
// conntrackMaxPerCore is the maximum number of NAT connections to track
|
||||||
// per CPU core (0 to leave as-is). This value is only considered if
|
// per CPU core (0 to leave the limit as-is and ignore conntrackMin).
|
||||||
// conntrackMax == 0.
|
|
||||||
ConntrackMaxPerCore int32 `json:"conntrackMaxPerCore"`
|
ConntrackMaxPerCore int32 `json:"conntrackMaxPerCore"`
|
||||||
|
// conntrackMin is the minimum value of connect-tracking records to allocate,
|
||||||
|
// regardless of conntrackMaxPerCore (set conntrackMaxPerCore=0 to leave the limit as-is).
|
||||||
|
ConntrackMin int32 `json:"conntrackMin"`
|
||||||
// conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open
|
// conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open
|
||||||
// (e.g. '250ms', '2s'). Must be greater than 0.
|
// (e.g. '250ms', '2s'). Must be greater than 0.
|
||||||
ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"`
|
ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"`
|
||||||
|
|
|
@ -69,6 +69,7 @@ func autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyCon
|
||||||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
out.UDPIdleTimeout = in.UDPIdleTimeout
|
||||||
out.ConntrackMax = in.ConntrackMax
|
out.ConntrackMax = in.ConntrackMax
|
||||||
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
|
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
|
||||||
|
out.ConntrackMin = in.ConntrackMin
|
||||||
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
|
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -98,6 +99,7 @@ func autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyCon
|
||||||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
out.UDPIdleTimeout = in.UDPIdleTimeout
|
||||||
out.ConntrackMax = in.ConntrackMax
|
out.ConntrackMax = in.ConntrackMax
|
||||||
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
|
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
|
||||||
|
out.ConntrackMin = in.ConntrackMin
|
||||||
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
|
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
1
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go
generated
vendored
|
@ -75,6 +75,7 @@ func DeepCopy_v1alpha1_KubeProxyConfiguration(in interface{}, out interface{}, c
|
||||||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
out.UDPIdleTimeout = in.UDPIdleTimeout
|
||||||
out.ConntrackMax = in.ConntrackMax
|
out.ConntrackMax = in.ConntrackMax
|
||||||
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
|
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
|
||||||
|
out.ConntrackMin = in.ConntrackMin
|
||||||
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
|
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -162,6 +162,7 @@ func DeepCopy_componentconfig_KubeProxyConfiguration(in interface{}, out interfa
|
||||||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
out.UDPIdleTimeout = in.UDPIdleTimeout
|
||||||
out.ConntrackMax = in.ConntrackMax
|
out.ConntrackMax = in.ConntrackMax
|
||||||
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
|
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
|
||||||
|
out.ConntrackMin = in.ConntrackMin
|
||||||
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
|
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/pborman/uuid"
|
"github.com/pborman/uuid"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/apiserver"
|
"k8s.io/kubernetes/pkg/apiserver"
|
||||||
|
@ -39,7 +40,11 @@ type auditResponseWriter struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *auditResponseWriter) WriteHeader(code int) {
|
func (a *auditResponseWriter) WriteHeader(code int) {
|
||||||
fmt.Fprintf(a.out, "%s AUDIT: id=%q response=\"%d\"\n", time.Now().Format(time.RFC3339Nano), a.id, code)
|
line := fmt.Sprintf("%s AUDIT: id=%q response=\"%d\"\n", time.Now().Format(time.RFC3339Nano), a.id, code)
|
||||||
|
if _, err := fmt.Fprint(a.out, line); err != nil {
|
||||||
|
glog.Errorf("Unable to write audit log: %s, the error is: %v", line, err)
|
||||||
|
}
|
||||||
|
|
||||||
a.ResponseWriter.WriteHeader(code)
|
a.ResponseWriter.WriteHeader(code)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,8 +97,11 @@ func WithAudit(handler http.Handler, attributeGetter apiserver.RequestAttributeG
|
||||||
}
|
}
|
||||||
id := uuid.NewRandom().String()
|
id := uuid.NewRandom().String()
|
||||||
|
|
||||||
fmt.Fprintf(out, "%s AUDIT: id=%q ip=%q method=%q user=%q as=%q namespace=%q uri=%q\n",
|
line := fmt.Sprintf("%s AUDIT: id=%q ip=%q method=%q user=%q as=%q namespace=%q uri=%q\n",
|
||||||
time.Now().Format(time.RFC3339Nano), id, utilnet.GetClientIP(req), req.Method, attribs.GetUser().GetName(), asuser, namespace, req.URL)
|
time.Now().Format(time.RFC3339Nano), id, utilnet.GetClientIP(req), req.Method, attribs.GetUser().GetName(), asuser, namespace, req.URL)
|
||||||
|
if _, err := fmt.Fprint(out, line); err != nil {
|
||||||
|
glog.Errorf("Unable to write audit log: %s, the error is: %v", line, err)
|
||||||
|
}
|
||||||
respWriter := decorateResponseWriter(w, out, id)
|
respWriter := decorateResponseWriter(w, out, id)
|
||||||
handler.ServeHTTP(respWriter, req)
|
handler.ServeHTTP(respWriter, req)
|
||||||
})
|
})
|
||||||
|
|
|
@ -43,7 +43,8 @@ func init() {
|
||||||
|
|
||||||
// NewRequestAuthenticator creates an http handler that tries to authenticate the given request as a user, and then
|
// NewRequestAuthenticator creates an http handler that tries to authenticate the given request as a user, and then
|
||||||
// stores any such user found onto the provided context for the request. If authentication fails or returns an error
|
// stores any such user found onto the provided context for the request. If authentication fails or returns an error
|
||||||
// the failed handler is used. On success, handler is invoked to serve the request.
|
// the failed handler is used. On success, "Authorization" header is removed from the request and handler
|
||||||
|
// is invoked to serve the request.
|
||||||
func NewRequestAuthenticator(mapper api.RequestContextMapper, auth authenticator.Request, failed http.Handler, handler http.Handler) (http.Handler, error) {
|
func NewRequestAuthenticator(mapper api.RequestContextMapper, auth authenticator.Request, failed http.Handler, handler http.Handler) (http.Handler, error) {
|
||||||
return api.NewRequestContextFilter(
|
return api.NewRequestContextFilter(
|
||||||
mapper,
|
mapper,
|
||||||
|
@ -57,6 +58,9 @@ func NewRequestAuthenticator(mapper api.RequestContextMapper, auth authenticator
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// authorization header is not required anymore in case of a successful authentication.
|
||||||
|
req.Header.Del("Authorization")
|
||||||
|
|
||||||
if ctx, ok := mapper.Get(req); ok {
|
if ctx, ok := mapper.Get(req); ok {
|
||||||
mapper.Update(req, api.WithUser(ctx, user))
|
mapper.Update(req, api.WithUser(ctx, user))
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,13 +14,12 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package framework
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
|
@ -28,13 +27,13 @@ import (
|
||||||
|
|
||||||
// Config contains all the settings for a Controller.
|
// Config contains all the settings for a Controller.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// The queue for your objects; either a cache.FIFO or
|
// The queue for your objects; either a FIFO or
|
||||||
// a cache.DeltaFIFO. Your Process() function should accept
|
// a DeltaFIFO. Your Process() function should accept
|
||||||
// the output of this Oueue's Pop() method.
|
// the output of this Oueue's Pop() method.
|
||||||
cache.Queue
|
Queue
|
||||||
|
|
||||||
// Something that can list and watch your objects.
|
// Something that can list and watch your objects.
|
||||||
cache.ListerWatcher
|
ListerWatcher
|
||||||
|
|
||||||
// Something that can process your objects.
|
// Something that can process your objects.
|
||||||
Process ProcessFunc
|
Process ProcessFunc
|
||||||
|
@ -45,7 +44,7 @@ type Config struct {
|
||||||
// Reprocess everything at least this often.
|
// Reprocess everything at least this often.
|
||||||
// Note that if it takes longer for you to clear the queue than this
|
// Note that if it takes longer for you to clear the queue than this
|
||||||
// period, you will end up processing items in the order determined
|
// period, you will end up processing items in the order determined
|
||||||
// by cache.FIFO.Replace(). Currently, this is random. If this is a
|
// by FIFO.Replace(). Currently, this is random. If this is a
|
||||||
// problem, we can change that replacement policy to append new
|
// problem, we can change that replacement policy to append new
|
||||||
// things to the end of the queue instead of replacing the entire
|
// things to the end of the queue instead of replacing the entire
|
||||||
// queue.
|
// queue.
|
||||||
|
@ -64,7 +63,7 @@ type ProcessFunc func(obj interface{}) error
|
||||||
// Controller is a generic controller framework.
|
// Controller is a generic controller framework.
|
||||||
type Controller struct {
|
type Controller struct {
|
||||||
config Config
|
config Config
|
||||||
reflector *cache.Reflector
|
reflector *Reflector
|
||||||
reflectorMutex sync.RWMutex
|
reflectorMutex sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,7 +86,7 @@ func New(c *Config) *Controller {
|
||||||
// Run blocks; call via go.
|
// Run blocks; call via go.
|
||||||
func (c *Controller) Run(stopCh <-chan struct{}) {
|
func (c *Controller) Run(stopCh <-chan struct{}) {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
r := cache.NewReflector(
|
r := NewReflector(
|
||||||
c.config.ListerWatcher,
|
c.config.ListerWatcher,
|
||||||
c.config.ObjectType,
|
c.config.ObjectType,
|
||||||
c.config.Queue,
|
c.config.Queue,
|
||||||
|
@ -110,9 +109,9 @@ func (c *Controller) HasSynced() bool {
|
||||||
|
|
||||||
// Requeue adds the provided object back into the queue if it does not already exist.
|
// Requeue adds the provided object back into the queue if it does not already exist.
|
||||||
func (c *Controller) Requeue(obj interface{}) error {
|
func (c *Controller) Requeue(obj interface{}) error {
|
||||||
return c.config.Queue.AddIfNotPresent(cache.Deltas{
|
return c.config.Queue.AddIfNotPresent(Deltas{
|
||||||
cache.Delta{
|
Delta{
|
||||||
Type: cache.Sync,
|
Type: Sync,
|
||||||
Object: obj,
|
Object: obj,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -124,7 +123,7 @@ func (c *Controller) Requeue(obj interface{}) error {
|
||||||
// concurrently.
|
// concurrently.
|
||||||
func (c *Controller) processLoop() {
|
func (c *Controller) processLoop() {
|
||||||
for {
|
for {
|
||||||
obj, err := c.config.Queue.Pop(cache.PopProcessFunc(c.config.Process))
|
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if c.config.RetryOnError {
|
if c.config.RetryOnError {
|
||||||
// This is the safe way to re-enqueue.
|
// This is the safe way to re-enqueue.
|
||||||
|
@ -145,7 +144,7 @@ func (c *Controller) processLoop() {
|
||||||
// get called even if nothing changed. This is useful for periodically
|
// get called even if nothing changed. This is useful for periodically
|
||||||
// evaluating or syncing something.
|
// evaluating or syncing something.
|
||||||
// * OnDelete will get the final state of the item if it is known, otherwise
|
// * OnDelete will get the final state of the item if it is known, otherwise
|
||||||
// it will get an object of type cache.DeletedFinalStateUnknown. This can
|
// it will get an object of type DeletedFinalStateUnknown. This can
|
||||||
// happen if the watch is closed and misses the delete event and we don't
|
// happen if the watch is closed and misses the delete event and we don't
|
||||||
// notice the deletion until the subsequent re-list.
|
// notice the deletion until the subsequent re-list.
|
||||||
type ResourceEventHandler interface {
|
type ResourceEventHandler interface {
|
||||||
|
@ -185,18 +184,18 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeletionHandlingMetaNamespaceKeyFunc checks for
|
// DeletionHandlingMetaNamespaceKeyFunc checks for
|
||||||
// cache.DeletedFinalStateUnknown objects before calling
|
// DeletedFinalStateUnknown objects before calling
|
||||||
// cache.MetaNamespaceKeyFunc.
|
// MetaNamespaceKeyFunc.
|
||||||
func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||||
if d, ok := obj.(cache.DeletedFinalStateUnknown); ok {
|
if d, ok := obj.(DeletedFinalStateUnknown); ok {
|
||||||
return d.Key, nil
|
return d.Key, nil
|
||||||
}
|
}
|
||||||
return cache.MetaNamespaceKeyFunc(obj)
|
return MetaNamespaceKeyFunc(obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInformer returns a cache.Store and a controller for populating the store
|
// NewInformer returns a Store and a controller for populating the store
|
||||||
// while also providing event notifications. You should only used the returned
|
// while also providing event notifications. You should only used the returned
|
||||||
// cache.Store for Get/List operations; Add/Modify/Deletes will cause the event
|
// Store for Get/List operations; Add/Modify/Deletes will cause the event
|
||||||
// notifications to be faulty.
|
// notifications to be faulty.
|
||||||
//
|
//
|
||||||
// Parameters:
|
// Parameters:
|
||||||
|
@ -210,18 +209,18 @@ func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||||
// * h is the object you want notifications sent to.
|
// * h is the object you want notifications sent to.
|
||||||
//
|
//
|
||||||
func NewInformer(
|
func NewInformer(
|
||||||
lw cache.ListerWatcher,
|
lw ListerWatcher,
|
||||||
objType runtime.Object,
|
objType runtime.Object,
|
||||||
resyncPeriod time.Duration,
|
resyncPeriod time.Duration,
|
||||||
h ResourceEventHandler,
|
h ResourceEventHandler,
|
||||||
) (cache.Store, *Controller) {
|
) (Store, *Controller) {
|
||||||
// This will hold the client state, as we know it.
|
// This will hold the client state, as we know it.
|
||||||
clientState := cache.NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||||
|
|
||||||
// This will hold incoming changes. Note how we pass clientState in as a
|
// This will hold incoming changes. Note how we pass clientState in as a
|
||||||
// KeyLister, that way resync operations will result in the correct set
|
// KeyLister, that way resync operations will result in the correct set
|
||||||
// of update/delete deltas.
|
// of update/delete deltas.
|
||||||
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)
|
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
|
||||||
|
|
||||||
cfg := &Config{
|
cfg := &Config{
|
||||||
Queue: fifo,
|
Queue: fifo,
|
||||||
|
@ -232,9 +231,9 @@ func NewInformer(
|
||||||
|
|
||||||
Process: func(obj interface{}) error {
|
Process: func(obj interface{}) error {
|
||||||
// from oldest to newest
|
// from oldest to newest
|
||||||
for _, d := range obj.(cache.Deltas) {
|
for _, d := range obj.(Deltas) {
|
||||||
switch d.Type {
|
switch d.Type {
|
||||||
case cache.Sync, cache.Added, cache.Updated:
|
case Sync, Added, Updated:
|
||||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||||
if err := clientState.Update(d.Object); err != nil {
|
if err := clientState.Update(d.Object); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -246,7 +245,7 @@ func NewInformer(
|
||||||
}
|
}
|
||||||
h.OnAdd(d.Object)
|
h.OnAdd(d.Object)
|
||||||
}
|
}
|
||||||
case cache.Deleted:
|
case Deleted:
|
||||||
if err := clientState.Delete(d.Object); err != nil {
|
if err := clientState.Delete(d.Object); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -259,9 +258,9 @@ func NewInformer(
|
||||||
return clientState, New(cfg)
|
return clientState, New(cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIndexerInformer returns a cache.Indexer and a controller for populating the index
|
// NewIndexerInformer returns a Indexer and a controller for populating the index
|
||||||
// while also providing event notifications. You should only used the returned
|
// while also providing event notifications. You should only used the returned
|
||||||
// cache.Index for Get/List operations; Add/Modify/Deletes will cause the event
|
// Index for Get/List operations; Add/Modify/Deletes will cause the event
|
||||||
// notifications to be faulty.
|
// notifications to be faulty.
|
||||||
//
|
//
|
||||||
// Parameters:
|
// Parameters:
|
||||||
|
@ -275,19 +274,19 @@ func NewInformer(
|
||||||
// * h is the object you want notifications sent to.
|
// * h is the object you want notifications sent to.
|
||||||
//
|
//
|
||||||
func NewIndexerInformer(
|
func NewIndexerInformer(
|
||||||
lw cache.ListerWatcher,
|
lw ListerWatcher,
|
||||||
objType runtime.Object,
|
objType runtime.Object,
|
||||||
resyncPeriod time.Duration,
|
resyncPeriod time.Duration,
|
||||||
h ResourceEventHandler,
|
h ResourceEventHandler,
|
||||||
indexers cache.Indexers,
|
indexers Indexers,
|
||||||
) (cache.Indexer, *Controller) {
|
) (Indexer, *Controller) {
|
||||||
// This will hold the client state, as we know it.
|
// This will hold the client state, as we know it.
|
||||||
clientState := cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||||
|
|
||||||
// This will hold incoming changes. Note how we pass clientState in as a
|
// This will hold incoming changes. Note how we pass clientState in as a
|
||||||
// KeyLister, that way resync operations will result in the correct set
|
// KeyLister, that way resync operations will result in the correct set
|
||||||
// of update/delete deltas.
|
// of update/delete deltas.
|
||||||
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)
|
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
|
||||||
|
|
||||||
cfg := &Config{
|
cfg := &Config{
|
||||||
Queue: fifo,
|
Queue: fifo,
|
||||||
|
@ -298,9 +297,9 @@ func NewIndexerInformer(
|
||||||
|
|
||||||
Process: func(obj interface{}) error {
|
Process: func(obj interface{}) error {
|
||||||
// from oldest to newest
|
// from oldest to newest
|
||||||
for _, d := range obj.(cache.Deltas) {
|
for _, d := range obj.(Deltas) {
|
||||||
switch d.Type {
|
switch d.Type {
|
||||||
case cache.Sync, cache.Added, cache.Updated:
|
case Sync, Added, Updated:
|
||||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||||
if err := clientState.Update(d.Object); err != nil {
|
if err := clientState.Update(d.Object); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -312,7 +311,7 @@ func NewIndexerInformer(
|
||||||
}
|
}
|
||||||
h.OnAdd(d.Object)
|
h.OnAdd(d.Object)
|
||||||
}
|
}
|
||||||
case cache.Deleted:
|
case Deleted:
|
||||||
if err := clientState.Delete(d.Object); err != nil {
|
if err := clientState.Delete(d.Object); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
|
@ -14,14 +14,13 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package framework
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||||
)
|
)
|
||||||
|
@ -39,7 +38,7 @@ type SharedInformer interface {
|
||||||
// You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned.
|
// You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned.
|
||||||
// TODO we should try to remove this restriction eventually.
|
// TODO we should try to remove this restriction eventually.
|
||||||
AddEventHandler(handler ResourceEventHandler) error
|
AddEventHandler(handler ResourceEventHandler) error
|
||||||
GetStore() cache.Store
|
GetStore() Store
|
||||||
// GetController gives back a synthetic interface that "votes" to start the informer
|
// GetController gives back a synthetic interface that "votes" to start the informer
|
||||||
GetController() ControllerInterface
|
GetController() ControllerInterface
|
||||||
Run(stopCh <-chan struct{})
|
Run(stopCh <-chan struct{})
|
||||||
|
@ -50,24 +49,24 @@ type SharedInformer interface {
|
||||||
type SharedIndexInformer interface {
|
type SharedIndexInformer interface {
|
||||||
SharedInformer
|
SharedInformer
|
||||||
// AddIndexers add indexers to the informer before it starts.
|
// AddIndexers add indexers to the informer before it starts.
|
||||||
AddIndexers(indexers cache.Indexers) error
|
AddIndexers(indexers Indexers) error
|
||||||
GetIndexer() cache.Indexer
|
GetIndexer() Indexer
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSharedInformer creates a new instance for the listwatcher.
|
// NewSharedInformer creates a new instance for the listwatcher.
|
||||||
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
|
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
|
||||||
// be shared amongst all consumers.
|
// be shared amongst all consumers.
|
||||||
func NewSharedInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
|
func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
|
||||||
return NewSharedIndexInformer(lw, objType, resyncPeriod, cache.Indexers{})
|
return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSharedIndexInformer creates a new instance for the listwatcher.
|
// NewSharedIndexInformer creates a new instance for the listwatcher.
|
||||||
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
|
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
|
||||||
// be shared amongst all consumers.
|
// be shared amongst all consumers.
|
||||||
func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers cache.Indexers) SharedIndexInformer {
|
func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
|
||||||
sharedIndexInformer := &sharedIndexInformer{
|
sharedIndexInformer := &sharedIndexInformer{
|
||||||
processor: &sharedProcessor{},
|
processor: &sharedProcessor{},
|
||||||
indexer: cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
|
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
|
||||||
listerWatcher: lw,
|
listerWatcher: lw,
|
||||||
objectType: objType,
|
objectType: objType,
|
||||||
fullResyncPeriod: resyncPeriod,
|
fullResyncPeriod: resyncPeriod,
|
||||||
|
@ -76,13 +75,13 @@ func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resy
|
||||||
}
|
}
|
||||||
|
|
||||||
type sharedIndexInformer struct {
|
type sharedIndexInformer struct {
|
||||||
indexer cache.Indexer
|
indexer Indexer
|
||||||
controller *Controller
|
controller *Controller
|
||||||
|
|
||||||
processor *sharedProcessor
|
processor *sharedProcessor
|
||||||
|
|
||||||
// This block is tracked to handle late initialization of the controller
|
// This block is tracked to handle late initialization of the controller
|
||||||
listerWatcher cache.ListerWatcher
|
listerWatcher ListerWatcher
|
||||||
objectType runtime.Object
|
objectType runtime.Object
|
||||||
fullResyncPeriod time.Duration
|
fullResyncPeriod time.Duration
|
||||||
|
|
||||||
|
@ -129,7 +128,7 @@ type deleteNotification struct {
|
||||||
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, s.indexer)
|
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, s.indexer)
|
||||||
|
|
||||||
cfg := &Config{
|
cfg := &Config{
|
||||||
Queue: fifo,
|
Queue: fifo,
|
||||||
|
@ -180,15 +179,15 @@ func (s *sharedIndexInformer) LastSyncResourceVersion() string {
|
||||||
return s.controller.reflector.LastSyncResourceVersion()
|
return s.controller.reflector.LastSyncResourceVersion()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *sharedIndexInformer) GetStore() cache.Store {
|
func (s *sharedIndexInformer) GetStore() Store {
|
||||||
return s.indexer
|
return s.indexer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *sharedIndexInformer) GetIndexer() cache.Indexer {
|
func (s *sharedIndexInformer) GetIndexer() Indexer {
|
||||||
return s.indexer
|
return s.indexer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *sharedIndexInformer) AddIndexers(indexers cache.Indexers) error {
|
func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error {
|
||||||
s.startedLock.Lock()
|
s.startedLock.Lock()
|
||||||
defer s.startedLock.Unlock()
|
defer s.startedLock.Unlock()
|
||||||
|
|
||||||
|
@ -240,9 +239,9 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||||
defer s.blockDeltas.Unlock()
|
defer s.blockDeltas.Unlock()
|
||||||
|
|
||||||
// from oldest to newest
|
// from oldest to newest
|
||||||
for _, d := range obj.(cache.Deltas) {
|
for _, d := range obj.(Deltas) {
|
||||||
switch d.Type {
|
switch d.Type {
|
||||||
case cache.Sync, cache.Added, cache.Updated:
|
case Sync, Added, Updated:
|
||||||
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
|
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
|
||||||
if err := s.indexer.Update(d.Object); err != nil {
|
if err := s.indexer.Update(d.Object); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -254,7 +253,7 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||||
}
|
}
|
||||||
s.processor.distribute(addNotification{newObj: d.Object})
|
s.processor.distribute(addNotification{newObj: d.Object})
|
||||||
}
|
}
|
||||||
case cache.Deleted:
|
case Deleted:
|
||||||
if err := s.indexer.Delete(d.Object); err != nil {
|
if err := s.indexer.Delete(d.Object); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
|
@ -29,7 +29,6 @@ import (
|
||||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
utilcertificates "k8s.io/kubernetes/pkg/util/certificates"
|
utilcertificates "k8s.io/kubernetes/pkg/util/certificates"
|
||||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||||
|
@ -47,7 +46,7 @@ type CertificateController struct {
|
||||||
kubeClient clientset.Interface
|
kubeClient clientset.Interface
|
||||||
|
|
||||||
// CSR framework and store
|
// CSR framework and store
|
||||||
csrController *framework.Controller
|
csrController *cache.Controller
|
||||||
csrStore cache.StoreToCertificateRequestLister
|
csrStore cache.StoreToCertificateRequestLister
|
||||||
|
|
||||||
// To allow injection of updateCertificateRequestStatus for testing.
|
// To allow injection of updateCertificateRequestStatus for testing.
|
||||||
|
@ -85,7 +84,7 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du
|
||||||
}
|
}
|
||||||
|
|
||||||
// Manage the addition/update of certificate requests
|
// Manage the addition/update of certificate requests
|
||||||
cc.csrStore.Store, cc.csrController = framework.NewInformer(
|
cc.csrStore.Store, cc.csrController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return cc.kubeClient.Certificates().CertificateSigningRequests().List(options)
|
return cc.kubeClient.Certificates().CertificateSigningRequests().List(options)
|
||||||
|
@ -96,7 +95,7 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du
|
||||||
},
|
},
|
||||||
&certificates.CertificateSigningRequest{},
|
&certificates.CertificateSigningRequest{},
|
||||||
syncPeriod,
|
syncPeriod,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(obj interface{}) {
|
AddFunc: func(obj interface{}) {
|
||||||
csr := obj.(*certificates.CertificateSigningRequest)
|
csr := obj.(*certificates.CertificateSigningRequest)
|
||||||
glog.V(4).Infof("Adding certificate request %s", csr.Name)
|
glog.V(4).Infof("Adding certificate request %s", csr.Name)
|
||||||
|
|
|
@ -31,7 +31,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/clock"
|
"k8s.io/kubernetes/pkg/util/clock"
|
||||||
|
@ -54,7 +53,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
KeyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
|
KeyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
|
||||||
)
|
)
|
||||||
|
|
||||||
type ResyncPeriodFunc func() time.Duration
|
type ResyncPeriodFunc func() time.Duration
|
||||||
|
@ -220,6 +219,8 @@ type Expectations interface {
|
||||||
|
|
||||||
// ControlleeExpectations track controllee creates/deletes.
|
// ControlleeExpectations track controllee creates/deletes.
|
||||||
type ControlleeExpectations struct {
|
type ControlleeExpectations struct {
|
||||||
|
// Important: Since these two int64 fields are using sync/atomic, they have to be at the top of the struct due to a bug on 32-bit platforms
|
||||||
|
// See: https://golang.org/pkg/sync/atomic/ for more information
|
||||||
add int64
|
add int64
|
||||||
del int64
|
del int64
|
||||||
key string
|
key string
|
||||||
|
|
|
@ -23,8 +23,6 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
|
@ -34,8 +32,7 @@ import (
|
||||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
|
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
|
@ -45,6 +42,8 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/watch"
|
"k8s.io/kubernetes/pkg/watch"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -75,7 +74,7 @@ type DaemonSetsController struct {
|
||||||
// we have a personal informer, we must start it ourselves. If you start
|
// we have a personal informer, we must start it ourselves. If you start
|
||||||
// the controller using NewDaemonSetsController(passing SharedInformer), this
|
// the controller using NewDaemonSetsController(passing SharedInformer), this
|
||||||
// will be null
|
// will be null
|
||||||
internalPodInformer framework.SharedInformer
|
internalPodInformer cache.SharedInformer
|
||||||
|
|
||||||
// An dsc is temporarily suspended after creating/deleting these many replicas.
|
// An dsc is temporarily suspended after creating/deleting these many replicas.
|
||||||
// It resumes normal action after observing the watch events for them.
|
// It resumes normal action after observing the watch events for them.
|
||||||
|
@ -92,11 +91,11 @@ type DaemonSetsController struct {
|
||||||
// A store of nodes
|
// A store of nodes
|
||||||
nodeStore cache.StoreToNodeLister
|
nodeStore cache.StoreToNodeLister
|
||||||
// Watches changes to all daemon sets.
|
// Watches changes to all daemon sets.
|
||||||
dsController *framework.Controller
|
dsController *cache.Controller
|
||||||
// Watches changes to all pods
|
// Watches changes to all pods
|
||||||
podController framework.ControllerInterface
|
podController cache.ControllerInterface
|
||||||
// Watches changes to all nodes.
|
// Watches changes to all nodes.
|
||||||
nodeController *framework.Controller
|
nodeController *cache.Controller
|
||||||
// podStoreSynced returns true if the pod store has been synced at least once.
|
// podStoreSynced returns true if the pod store has been synced at least once.
|
||||||
// Added as a member to the struct to allow injection for testing.
|
// Added as a member to the struct to allow injection for testing.
|
||||||
podStoreSynced func() bool
|
podStoreSynced func() bool
|
||||||
|
@ -107,7 +106,7 @@ type DaemonSetsController struct {
|
||||||
queue *workqueue.Type
|
queue *workqueue.Type
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController {
|
func NewDaemonSetsController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||||
|
@ -128,7 +127,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
|
||||||
queue: workqueue.NewNamed("daemonset"),
|
queue: workqueue.NewNamed("daemonset"),
|
||||||
}
|
}
|
||||||
// Manage addition/update of daemon sets.
|
// Manage addition/update of daemon sets.
|
||||||
dsc.dsStore.Store, dsc.dsController = framework.NewInformer(
|
dsc.dsStore.Store, dsc.dsController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return dsc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options)
|
return dsc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options)
|
||||||
|
@ -140,7 +139,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
|
||||||
&extensions.DaemonSet{},
|
&extensions.DaemonSet{},
|
||||||
// TODO: Can we have much longer period here?
|
// TODO: Can we have much longer period here?
|
||||||
FullDaemonSetResyncPeriod,
|
FullDaemonSetResyncPeriod,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(obj interface{}) {
|
AddFunc: func(obj interface{}) {
|
||||||
ds := obj.(*extensions.DaemonSet)
|
ds := obj.(*extensions.DaemonSet)
|
||||||
glog.V(4).Infof("Adding daemon set %s", ds.Name)
|
glog.V(4).Infof("Adding daemon set %s", ds.Name)
|
||||||
|
@ -173,7 +172,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
|
||||||
|
|
||||||
// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
|
// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
|
||||||
// more pods until all the effects (expectations) of a daemon set's create/delete have been observed.
|
// more pods until all the effects (expectations) of a daemon set's create/delete have been observed.
|
||||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: dsc.addPod,
|
AddFunc: dsc.addPod,
|
||||||
UpdateFunc: dsc.updatePod,
|
UpdateFunc: dsc.updatePod,
|
||||||
DeleteFunc: dsc.deletePod,
|
DeleteFunc: dsc.deletePod,
|
||||||
|
@ -183,7 +182,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
|
||||||
dsc.podStoreSynced = podInformer.HasSynced
|
dsc.podStoreSynced = podInformer.HasSynced
|
||||||
|
|
||||||
// Watch for new nodes or updates to nodes - daemon pods are launched on new nodes, and possibly when labels on nodes change,
|
// Watch for new nodes or updates to nodes - daemon pods are launched on new nodes, and possibly when labels on nodes change,
|
||||||
dsc.nodeStore.Store, dsc.nodeController = framework.NewInformer(
|
dsc.nodeStore.Store, dsc.nodeController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return dsc.kubeClient.Core().Nodes().List(options)
|
return dsc.kubeClient.Core().Nodes().List(options)
|
||||||
|
@ -194,7 +193,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
|
||||||
},
|
},
|
||||||
&api.Node{},
|
&api.Node{},
|
||||||
resyncPeriod(),
|
resyncPeriod(),
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: dsc.addNode,
|
AddFunc: dsc.addNode,
|
||||||
UpdateFunc: dsc.updateNode,
|
UpdateFunc: dsc.updateNode,
|
||||||
},
|
},
|
||||||
|
|
|
@ -36,7 +36,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
|
@ -70,15 +69,15 @@ type DeploymentController struct {
|
||||||
// A store of deployments, populated by the dController
|
// A store of deployments, populated by the dController
|
||||||
dStore cache.StoreToDeploymentLister
|
dStore cache.StoreToDeploymentLister
|
||||||
// Watches changes to all deployments
|
// Watches changes to all deployments
|
||||||
dController *framework.Controller
|
dController *cache.Controller
|
||||||
// A store of ReplicaSets, populated by the rsController
|
// A store of ReplicaSets, populated by the rsController
|
||||||
rsStore cache.StoreToReplicaSetLister
|
rsStore cache.StoreToReplicaSetLister
|
||||||
// Watches changes to all ReplicaSets
|
// Watches changes to all ReplicaSets
|
||||||
rsController *framework.Controller
|
rsController *cache.Controller
|
||||||
// A store of pods, populated by the podController
|
// A store of pods, populated by the podController
|
||||||
podStore cache.StoreToPodLister
|
podStore cache.StoreToPodLister
|
||||||
// Watches changes to all pods
|
// Watches changes to all pods
|
||||||
podController *framework.Controller
|
podController *cache.Controller
|
||||||
|
|
||||||
// dStoreSynced returns true if the Deployment store has been synced at least once.
|
// dStoreSynced returns true if the Deployment store has been synced at least once.
|
||||||
// Added as a member to the struct to allow injection for testing.
|
// Added as a member to the struct to allow injection for testing.
|
||||||
|
@ -110,7 +109,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
|
||||||
}
|
}
|
||||||
|
|
||||||
dc.dStore.Indexer, dc.dController = framework.NewIndexerInformer(
|
dc.dStore.Indexer, dc.dController = cache.NewIndexerInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return dc.client.Extensions().Deployments(api.NamespaceAll).List(options)
|
return dc.client.Extensions().Deployments(api.NamespaceAll).List(options)
|
||||||
|
@ -121,7 +120,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
||||||
},
|
},
|
||||||
&extensions.Deployment{},
|
&extensions.Deployment{},
|
||||||
FullDeploymentResyncPeriod,
|
FullDeploymentResyncPeriod,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: dc.addDeploymentNotification,
|
AddFunc: dc.addDeploymentNotification,
|
||||||
UpdateFunc: dc.updateDeploymentNotification,
|
UpdateFunc: dc.updateDeploymentNotification,
|
||||||
// This will enter the sync loop and no-op, because the deployment has been deleted from the store.
|
// This will enter the sync loop and no-op, because the deployment has been deleted from the store.
|
||||||
|
@ -130,7 +129,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
||||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||||
)
|
)
|
||||||
|
|
||||||
dc.rsStore.Store, dc.rsController = framework.NewInformer(
|
dc.rsStore.Store, dc.rsController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return dc.client.Extensions().ReplicaSets(api.NamespaceAll).List(options)
|
return dc.client.Extensions().ReplicaSets(api.NamespaceAll).List(options)
|
||||||
|
@ -141,14 +140,14 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
||||||
},
|
},
|
||||||
&extensions.ReplicaSet{},
|
&extensions.ReplicaSet{},
|
||||||
resyncPeriod(),
|
resyncPeriod(),
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: dc.addReplicaSet,
|
AddFunc: dc.addReplicaSet,
|
||||||
UpdateFunc: dc.updateReplicaSet,
|
UpdateFunc: dc.updateReplicaSet,
|
||||||
DeleteFunc: dc.deleteReplicaSet,
|
DeleteFunc: dc.deleteReplicaSet,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
dc.podStore.Indexer, dc.podController = framework.NewIndexerInformer(
|
dc.podStore.Indexer, dc.podController = cache.NewIndexerInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return dc.client.Core().Pods(api.NamespaceAll).List(options)
|
return dc.client.Core().Pods(api.NamespaceAll).List(options)
|
||||||
|
@ -159,7 +158,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
||||||
},
|
},
|
||||||
&api.Pod{},
|
&api.Pod{},
|
||||||
resyncPeriod(),
|
resyncPeriod(),
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: dc.addPod,
|
AddFunc: dc.addPod,
|
||||||
UpdateFunc: dc.updatePod,
|
UpdateFunc: dc.updatePod,
|
||||||
DeleteFunc: dc.deletePod,
|
DeleteFunc: dc.deletePod,
|
||||||
|
|
|
@ -28,7 +28,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/types"
|
"k8s.io/kubernetes/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/util/intstr"
|
"k8s.io/kubernetes/pkg/util/intstr"
|
||||||
|
@ -47,22 +46,22 @@ type DisruptionController struct {
|
||||||
kubeClient *client.Client
|
kubeClient *client.Client
|
||||||
|
|
||||||
pdbStore cache.Store
|
pdbStore cache.Store
|
||||||
pdbController *framework.Controller
|
pdbController *cache.Controller
|
||||||
pdbLister cache.StoreToPodDisruptionBudgetLister
|
pdbLister cache.StoreToPodDisruptionBudgetLister
|
||||||
|
|
||||||
podController framework.ControllerInterface
|
podController cache.ControllerInterface
|
||||||
podLister cache.StoreToPodLister
|
podLister cache.StoreToPodLister
|
||||||
|
|
||||||
rcIndexer cache.Indexer
|
rcIndexer cache.Indexer
|
||||||
rcController *framework.Controller
|
rcController *cache.Controller
|
||||||
rcLister cache.StoreToReplicationControllerLister
|
rcLister cache.StoreToReplicationControllerLister
|
||||||
|
|
||||||
rsStore cache.Store
|
rsStore cache.Store
|
||||||
rsController *framework.Controller
|
rsController *cache.Controller
|
||||||
rsLister cache.StoreToReplicaSetLister
|
rsLister cache.StoreToReplicaSetLister
|
||||||
|
|
||||||
dIndexer cache.Indexer
|
dIndexer cache.Indexer
|
||||||
dController *framework.Controller
|
dController *cache.Controller
|
||||||
dLister cache.StoreToDeploymentLister
|
dLister cache.StoreToDeploymentLister
|
||||||
|
|
||||||
queue *workqueue.Type
|
queue *workqueue.Type
|
||||||
|
@ -84,7 +83,7 @@ type controllerAndScale struct {
|
||||||
// controllers and their scale.
|
// controllers and their scale.
|
||||||
type podControllerFinder func(*api.Pod) ([]controllerAndScale, error)
|
type podControllerFinder func(*api.Pod) ([]controllerAndScale, error)
|
||||||
|
|
||||||
func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClient *client.Client) *DisruptionController {
|
func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient *client.Client) *DisruptionController {
|
||||||
dc := &DisruptionController{
|
dc := &DisruptionController{
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
podController: podInformer.GetController(),
|
podController: podInformer.GetController(),
|
||||||
|
@ -97,13 +96,13 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
|
||||||
|
|
||||||
dc.podLister.Indexer = podInformer.GetIndexer()
|
dc.podLister.Indexer = podInformer.GetIndexer()
|
||||||
|
|
||||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: dc.addPod,
|
AddFunc: dc.addPod,
|
||||||
UpdateFunc: dc.updatePod,
|
UpdateFunc: dc.updatePod,
|
||||||
DeleteFunc: dc.deletePod,
|
DeleteFunc: dc.deletePod,
|
||||||
})
|
})
|
||||||
|
|
||||||
dc.pdbStore, dc.pdbController = framework.NewInformer(
|
dc.pdbStore, dc.pdbController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return dc.kubeClient.Policy().PodDisruptionBudgets(api.NamespaceAll).List(options)
|
return dc.kubeClient.Policy().PodDisruptionBudgets(api.NamespaceAll).List(options)
|
||||||
|
@ -114,7 +113,7 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
|
||||||
},
|
},
|
||||||
&policy.PodDisruptionBudget{},
|
&policy.PodDisruptionBudget{},
|
||||||
30*time.Second,
|
30*time.Second,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: dc.addDb,
|
AddFunc: dc.addDb,
|
||||||
UpdateFunc: dc.updateDb,
|
UpdateFunc: dc.updateDb,
|
||||||
DeleteFunc: dc.removeDb,
|
DeleteFunc: dc.removeDb,
|
||||||
|
@ -122,7 +121,7 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
|
||||||
)
|
)
|
||||||
dc.pdbLister.Store = dc.pdbStore
|
dc.pdbLister.Store = dc.pdbStore
|
||||||
|
|
||||||
dc.rcIndexer, dc.rcController = framework.NewIndexerInformer(
|
dc.rcIndexer, dc.rcController = cache.NewIndexerInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return dc.kubeClient.ReplicationControllers(api.NamespaceAll).List(options)
|
return dc.kubeClient.ReplicationControllers(api.NamespaceAll).List(options)
|
||||||
|
@ -133,13 +132,13 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
|
||||||
},
|
},
|
||||||
&api.ReplicationController{},
|
&api.ReplicationController{},
|
||||||
30*time.Second,
|
30*time.Second,
|
||||||
framework.ResourceEventHandlerFuncs{},
|
cache.ResourceEventHandlerFuncs{},
|
||||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||||
)
|
)
|
||||||
|
|
||||||
dc.rcLister.Indexer = dc.rcIndexer
|
dc.rcLister.Indexer = dc.rcIndexer
|
||||||
|
|
||||||
dc.rsStore, dc.rsController = framework.NewInformer(
|
dc.rsStore, dc.rsController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return dc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
|
return dc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
|
||||||
|
@ -150,12 +149,12 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
|
||||||
},
|
},
|
||||||
&extensions.ReplicaSet{},
|
&extensions.ReplicaSet{},
|
||||||
30*time.Second,
|
30*time.Second,
|
||||||
framework.ResourceEventHandlerFuncs{},
|
cache.ResourceEventHandlerFuncs{},
|
||||||
)
|
)
|
||||||
|
|
||||||
dc.rsLister.Store = dc.rsStore
|
dc.rsLister.Store = dc.rsStore
|
||||||
|
|
||||||
dc.dIndexer, dc.dController = framework.NewIndexerInformer(
|
dc.dIndexer, dc.dController = cache.NewIndexerInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return dc.kubeClient.Extensions().Deployments(api.NamespaceAll).List(options)
|
return dc.kubeClient.Extensions().Deployments(api.NamespaceAll).List(options)
|
||||||
|
@ -166,7 +165,7 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
|
||||||
},
|
},
|
||||||
&extensions.Deployment{},
|
&extensions.Deployment{},
|
||||||
30*time.Second,
|
30*time.Second,
|
||||||
framework.ResourceEventHandlerFuncs{},
|
cache.ResourceEventHandlerFuncs{},
|
||||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -34,8 +34,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
|
@ -66,11 +65,11 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
|
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewEndpointController returns a new *EndpointController.
|
// NewEndpointController returns a new *EndpointController.
|
||||||
func NewEndpointController(podInformer framework.SharedIndexInformer, client *clientset.Clientset) *EndpointController {
|
func NewEndpointController(podInformer cache.SharedIndexInformer, client *clientset.Clientset) *EndpointController {
|
||||||
if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil {
|
if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil {
|
||||||
metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().GetRESTClient().GetRateLimiter())
|
metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().GetRESTClient().GetRateLimiter())
|
||||||
}
|
}
|
||||||
|
@ -79,7 +78,7 @@ func NewEndpointController(podInformer framework.SharedIndexInformer, client *cl
|
||||||
queue: workqueue.NewNamed("endpoint"),
|
queue: workqueue.NewNamed("endpoint"),
|
||||||
}
|
}
|
||||||
|
|
||||||
e.serviceStore.Store, e.serviceController = framework.NewInformer(
|
e.serviceStore.Store, e.serviceController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return e.client.Core().Services(api.NamespaceAll).List(options)
|
return e.client.Core().Services(api.NamespaceAll).List(options)
|
||||||
|
@ -91,7 +90,7 @@ func NewEndpointController(podInformer framework.SharedIndexInformer, client *cl
|
||||||
&api.Service{},
|
&api.Service{},
|
||||||
// TODO: Can we have much longer period here?
|
// TODO: Can we have much longer period here?
|
||||||
FullServiceResyncPeriod,
|
FullServiceResyncPeriod,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: e.enqueueService,
|
AddFunc: e.enqueueService,
|
||||||
UpdateFunc: func(old, cur interface{}) {
|
UpdateFunc: func(old, cur interface{}) {
|
||||||
e.enqueueService(cur)
|
e.enqueueService(cur)
|
||||||
|
@ -100,7 +99,7 @@ func NewEndpointController(podInformer framework.SharedIndexInformer, client *cl
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: e.addPod,
|
AddFunc: e.addPod,
|
||||||
UpdateFunc: e.updatePod,
|
UpdateFunc: e.updatePod,
|
||||||
DeleteFunc: e.deletePod,
|
DeleteFunc: e.deletePod,
|
||||||
|
@ -133,7 +132,7 @@ type EndpointController struct {
|
||||||
// we have a personal informer, we must start it ourselves. If you start
|
// we have a personal informer, we must start it ourselves. If you start
|
||||||
// the controller using NewEndpointController(passing SharedInformer), this
|
// the controller using NewEndpointController(passing SharedInformer), this
|
||||||
// will be null
|
// will be null
|
||||||
internalPodInformer framework.SharedIndexInformer
|
internalPodInformer cache.SharedIndexInformer
|
||||||
|
|
||||||
// Services that need to be updated. A channel is inappropriate here,
|
// Services that need to be updated. A channel is inappropriate here,
|
||||||
// because it allows services with lots of pods to be serviced much
|
// because it allows services with lots of pods to be serviced much
|
||||||
|
@ -144,8 +143,8 @@ type EndpointController struct {
|
||||||
|
|
||||||
// Since we join two objects, we'll watch both of them with
|
// Since we join two objects, we'll watch both of them with
|
||||||
// controllers.
|
// controllers.
|
||||||
serviceController *framework.Controller
|
serviceController *cache.Controller
|
||||||
podController framework.ControllerInterface
|
podController cache.ControllerInterface
|
||||||
// podStoreSynced returns true if the pod store has been synced at least once.
|
// podStoreSynced returns true if the pod store has been synced at least once.
|
||||||
// Added as a member to the struct to allow injection for testing.
|
// Added as a member to the struct to allow injection for testing.
|
||||||
podStoreSynced func() bool
|
podStoreSynced func() bool
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2015 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package framework implements all the grunt work involved in running a simple controller.
|
|
||||||
package framework
|
|
|
@ -1,262 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2015 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package framework
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"math/rand"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/meta"
|
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
|
||||||
"k8s.io/kubernetes/pkg/types"
|
|
||||||
"k8s.io/kubernetes/pkg/watch"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewFakeControllerSource() *FakeControllerSource {
|
|
||||||
return &FakeControllerSource{
|
|
||||||
Items: map[nnu]runtime.Object{},
|
|
||||||
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFakePVControllerSource() *FakePVControllerSource {
|
|
||||||
return &FakePVControllerSource{
|
|
||||||
FakeControllerSource{
|
|
||||||
Items: map[nnu]runtime.Object{},
|
|
||||||
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFakePVCControllerSource() *FakePVCControllerSource {
|
|
||||||
return &FakePVCControllerSource{
|
|
||||||
FakeControllerSource{
|
|
||||||
Items: map[nnu]runtime.Object{},
|
|
||||||
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FakeControllerSource implements listing/watching for testing.
|
|
||||||
type FakeControllerSource struct {
|
|
||||||
lock sync.RWMutex
|
|
||||||
Items map[nnu]runtime.Object
|
|
||||||
changes []watch.Event // one change per resourceVersion
|
|
||||||
Broadcaster *watch.Broadcaster
|
|
||||||
}
|
|
||||||
|
|
||||||
type FakePVControllerSource struct {
|
|
||||||
FakeControllerSource
|
|
||||||
}
|
|
||||||
|
|
||||||
type FakePVCControllerSource struct {
|
|
||||||
FakeControllerSource
|
|
||||||
}
|
|
||||||
|
|
||||||
// namespace, name, uid to be used as a key.
|
|
||||||
type nnu struct {
|
|
||||||
namespace, name string
|
|
||||||
uid types.UID
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds an object to the set and sends an add event to watchers.
|
|
||||||
// obj's ResourceVersion is set.
|
|
||||||
func (f *FakeControllerSource) Add(obj runtime.Object) {
|
|
||||||
f.Change(watch.Event{Type: watch.Added, Object: obj}, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Modify updates an object in the set and sends a modified event to watchers.
|
|
||||||
// obj's ResourceVersion is set.
|
|
||||||
func (f *FakeControllerSource) Modify(obj runtime.Object) {
|
|
||||||
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes an object from the set and sends a delete event to watchers.
|
|
||||||
// obj's ResourceVersion is set.
|
|
||||||
func (f *FakeControllerSource) Delete(lastValue runtime.Object) {
|
|
||||||
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddDropWatch adds an object to the set but forgets to send an add event to
|
|
||||||
// watchers.
|
|
||||||
// obj's ResourceVersion is set.
|
|
||||||
func (f *FakeControllerSource) AddDropWatch(obj runtime.Object) {
|
|
||||||
f.Change(watch.Event{Type: watch.Added, Object: obj}, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModifyDropWatch updates an object in the set but forgets to send a modify
|
|
||||||
// event to watchers.
|
|
||||||
// obj's ResourceVersion is set.
|
|
||||||
func (f *FakeControllerSource) ModifyDropWatch(obj runtime.Object) {
|
|
||||||
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteDropWatch deletes an object from the set but forgets to send a delete
|
|
||||||
// event to watchers.
|
|
||||||
// obj's ResourceVersion is set.
|
|
||||||
func (f *FakeControllerSource) DeleteDropWatch(lastValue runtime.Object) {
|
|
||||||
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FakeControllerSource) key(accessor meta.Object) nnu {
|
|
||||||
return nnu{accessor.GetNamespace(), accessor.GetName(), accessor.GetUID()}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Change records the given event (setting the object's resource version) and
|
|
||||||
// sends a watch event with the specified probability.
|
|
||||||
func (f *FakeControllerSource) Change(e watch.Event, watchProbability float64) {
|
|
||||||
f.lock.Lock()
|
|
||||||
defer f.lock.Unlock()
|
|
||||||
|
|
||||||
accessor, err := meta.Accessor(e.Object)
|
|
||||||
if err != nil {
|
|
||||||
panic(err) // this is test code only
|
|
||||||
}
|
|
||||||
|
|
||||||
resourceVersion := len(f.changes) + 1
|
|
||||||
accessor.SetResourceVersion(strconv.Itoa(resourceVersion))
|
|
||||||
f.changes = append(f.changes, e)
|
|
||||||
key := f.key(accessor)
|
|
||||||
switch e.Type {
|
|
||||||
case watch.Added, watch.Modified:
|
|
||||||
f.Items[key] = e.Object
|
|
||||||
case watch.Deleted:
|
|
||||||
delete(f.Items, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
if rand.Float64() < watchProbability {
|
|
||||||
f.Broadcaster.Action(e.Type, e.Object)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FakeControllerSource) getListItemsLocked() ([]runtime.Object, error) {
|
|
||||||
list := make([]runtime.Object, 0, len(f.Items))
|
|
||||||
for _, obj := range f.Items {
|
|
||||||
// Must make a copy to allow clients to modify the object.
|
|
||||||
// Otherwise, if they make a change and write it back, they
|
|
||||||
// will inadvertently change our canonical copy (in
|
|
||||||
// addition to racing with other clients).
|
|
||||||
objCopy, err := api.Scheme.DeepCopy(obj)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
list = append(list, objCopy.(runtime.Object))
|
|
||||||
}
|
|
||||||
return list, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// List returns a list object, with its resource version set.
|
|
||||||
func (f *FakeControllerSource) List(options api.ListOptions) (runtime.Object, error) {
|
|
||||||
f.lock.RLock()
|
|
||||||
defer f.lock.RUnlock()
|
|
||||||
list, err := f.getListItemsLocked()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
listObj := &api.List{}
|
|
||||||
if err := meta.SetList(listObj, list); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
objMeta, err := api.ListMetaFor(listObj)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
resourceVersion := len(f.changes)
|
|
||||||
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
|
||||||
return listObj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// List returns a list object, with its resource version set.
|
|
||||||
func (f *FakePVControllerSource) List(options api.ListOptions) (runtime.Object, error) {
|
|
||||||
f.lock.RLock()
|
|
||||||
defer f.lock.RUnlock()
|
|
||||||
list, err := f.FakeControllerSource.getListItemsLocked()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
listObj := &api.PersistentVolumeList{}
|
|
||||||
if err := meta.SetList(listObj, list); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
objMeta, err := api.ListMetaFor(listObj)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
resourceVersion := len(f.changes)
|
|
||||||
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
|
||||||
return listObj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// List returns a list object, with its resource version set.
|
|
||||||
func (f *FakePVCControllerSource) List(options api.ListOptions) (runtime.Object, error) {
|
|
||||||
f.lock.RLock()
|
|
||||||
defer f.lock.RUnlock()
|
|
||||||
list, err := f.FakeControllerSource.getListItemsLocked()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
listObj := &api.PersistentVolumeClaimList{}
|
|
||||||
if err := meta.SetList(listObj, list); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
objMeta, err := api.ListMetaFor(listObj)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
resourceVersion := len(f.changes)
|
|
||||||
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
|
||||||
return listObj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch returns a watch, which will be pre-populated with all changes
|
|
||||||
// after resourceVersion.
|
|
||||||
func (f *FakeControllerSource) Watch(options api.ListOptions) (watch.Interface, error) {
|
|
||||||
f.lock.RLock()
|
|
||||||
defer f.lock.RUnlock()
|
|
||||||
rc, err := strconv.Atoi(options.ResourceVersion)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if rc < len(f.changes) {
|
|
||||||
changes := []watch.Event{}
|
|
||||||
for _, c := range f.changes[rc:] {
|
|
||||||
// Must make a copy to allow clients to modify the
|
|
||||||
// object. Otherwise, if they make a change and write
|
|
||||||
// it back, they will inadvertently change the our
|
|
||||||
// canonical copy (in addition to racing with other
|
|
||||||
// clients).
|
|
||||||
objCopy, err := api.Scheme.DeepCopy(c.Object)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
changes = append(changes, watch.Event{Type: c.Type, Object: objCopy.(runtime.Object)})
|
|
||||||
}
|
|
||||||
return f.Broadcaster.WatchWithPrefix(changes), nil
|
|
||||||
} else if rc > len(f.changes) {
|
|
||||||
return nil, errors.New("resource version in the future not supported by this fake")
|
|
||||||
}
|
|
||||||
return f.Broadcaster.Watch(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown closes the underlying broadcaster, waiting for events to be
|
|
||||||
// delivered. It's an error to call any method after calling shutdown. This is
|
|
||||||
// enforced by Shutdown() leaving f locked.
|
|
||||||
func (f *FakeControllerSource) Shutdown() {
|
|
||||||
f.lock.Lock() // Purposely no unlock.
|
|
||||||
f.Broadcaster.Shutdown()
|
|
||||||
}
|
|
|
@ -32,7 +32,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/types"
|
"k8s.io/kubernetes/pkg/types"
|
||||||
|
@ -49,7 +48,7 @@ const ResourceResyncTime time.Duration = 0
|
||||||
|
|
||||||
type monitor struct {
|
type monitor struct {
|
||||||
store cache.Store
|
store cache.Store
|
||||||
controller *framework.Controller
|
controller *cache.Controller
|
||||||
}
|
}
|
||||||
|
|
||||||
type objectReference struct {
|
type objectReference struct {
|
||||||
|
@ -488,11 +487,11 @@ func (gc *GarbageCollector) monitorFor(resource unversioned.GroupVersionResource
|
||||||
}
|
}
|
||||||
runtimeObject.GetObjectKind().SetGroupVersionKind(kind)
|
runtimeObject.GetObjectKind().SetGroupVersionKind(kind)
|
||||||
}
|
}
|
||||||
monitor.store, monitor.controller = framework.NewInformer(
|
monitor.store, monitor.controller = cache.NewInformer(
|
||||||
gcListWatcher(client, resource),
|
gcListWatcher(client, resource),
|
||||||
nil,
|
nil,
|
||||||
ResourceResyncTime,
|
ResourceResyncTime,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
// add the event to the propagator's eventQueue.
|
// add the event to the propagator's eventQueue.
|
||||||
AddFunc: func(obj interface{}) {
|
AddFunc: func(obj interface{}) {
|
||||||
setObjectTypeMeta(obj)
|
setObjectTypeMeta(obj)
|
||||||
|
|
|
@ -21,13 +21,12 @@ import (
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// PodInformer is type of SharedIndexInformer which watches and lists all pods.
|
// PodInformer is type of SharedIndexInformer which watches and lists all pods.
|
||||||
// Interface provides constructor for informer and lister for pods
|
// Interface provides constructor for informer and lister for pods
|
||||||
type PodInformer interface {
|
type PodInformer interface {
|
||||||
Informer() framework.SharedIndexInformer
|
Informer() cache.SharedIndexInformer
|
||||||
Lister() *cache.StoreToPodLister
|
Lister() *cache.StoreToPodLister
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,7 +36,7 @@ type podInformer struct {
|
||||||
|
|
||||||
// Informer checks whether podInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
// Informer checks whether podInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
||||||
// podInformer and connects it to sharedInformerFactory
|
// podInformer and connects it to sharedInformerFactory
|
||||||
func (f *podInformer) Informer() framework.SharedIndexInformer {
|
func (f *podInformer) Informer() cache.SharedIndexInformer {
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
defer f.lock.Unlock()
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
@ -63,7 +62,7 @@ func (f *podInformer) Lister() *cache.StoreToPodLister {
|
||||||
// NamespaceInformer is type of SharedIndexInformer which watches and lists all namespaces.
|
// NamespaceInformer is type of SharedIndexInformer which watches and lists all namespaces.
|
||||||
// Interface provides constructor for informer and lister for namsespaces
|
// Interface provides constructor for informer and lister for namsespaces
|
||||||
type NamespaceInformer interface {
|
type NamespaceInformer interface {
|
||||||
Informer() framework.SharedIndexInformer
|
Informer() cache.SharedIndexInformer
|
||||||
Lister() *cache.IndexerToNamespaceLister
|
Lister() *cache.IndexerToNamespaceLister
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,7 +72,7 @@ type namespaceInformer struct {
|
||||||
|
|
||||||
// Informer checks whether namespaceInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
// Informer checks whether namespaceInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
||||||
// namespaceInformer and connects it to sharedInformerFactory
|
// namespaceInformer and connects it to sharedInformerFactory
|
||||||
func (f *namespaceInformer) Informer() framework.SharedIndexInformer {
|
func (f *namespaceInformer) Informer() cache.SharedIndexInformer {
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
defer f.lock.Unlock()
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
@ -99,7 +98,7 @@ func (f *namespaceInformer) Lister() *cache.IndexerToNamespaceLister {
|
||||||
// NodeInformer is type of SharedIndexInformer which watches and lists all nodes.
|
// NodeInformer is type of SharedIndexInformer which watches and lists all nodes.
|
||||||
// Interface provides constructor for informer and lister for nodes
|
// Interface provides constructor for informer and lister for nodes
|
||||||
type NodeInformer interface {
|
type NodeInformer interface {
|
||||||
Informer() framework.SharedIndexInformer
|
Informer() cache.SharedIndexInformer
|
||||||
Lister() *cache.StoreToNodeLister
|
Lister() *cache.StoreToNodeLister
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,7 +108,7 @@ type nodeInformer struct {
|
||||||
|
|
||||||
// Informer checks whether nodeInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
// Informer checks whether nodeInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
||||||
// nodeInformer and connects it to sharedInformerFactory
|
// nodeInformer and connects it to sharedInformerFactory
|
||||||
func (f *nodeInformer) Informer() framework.SharedIndexInformer {
|
func (f *nodeInformer) Informer() cache.SharedIndexInformer {
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
defer f.lock.Unlock()
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
@ -135,7 +134,7 @@ func (f *nodeInformer) Lister() *cache.StoreToNodeLister {
|
||||||
// PVCInformer is type of SharedIndexInformer which watches and lists all persistent volume claims.
|
// PVCInformer is type of SharedIndexInformer which watches and lists all persistent volume claims.
|
||||||
// Interface provides constructor for informer and lister for persistent volume claims
|
// Interface provides constructor for informer and lister for persistent volume claims
|
||||||
type PVCInformer interface {
|
type PVCInformer interface {
|
||||||
Informer() framework.SharedIndexInformer
|
Informer() cache.SharedIndexInformer
|
||||||
Lister() *cache.StoreToPVCFetcher
|
Lister() *cache.StoreToPVCFetcher
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,7 +144,7 @@ type pvcInformer struct {
|
||||||
|
|
||||||
// Informer checks whether pvcInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
// Informer checks whether pvcInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
||||||
// pvcInformer and connects it to sharedInformerFactory
|
// pvcInformer and connects it to sharedInformerFactory
|
||||||
func (f *pvcInformer) Informer() framework.SharedIndexInformer {
|
func (f *pvcInformer) Informer() cache.SharedIndexInformer {
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
defer f.lock.Unlock()
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
@ -171,7 +170,7 @@ func (f *pvcInformer) Lister() *cache.StoreToPVCFetcher {
|
||||||
// PVInformer is type of SharedIndexInformer which watches and lists all persistent volumes.
|
// PVInformer is type of SharedIndexInformer which watches and lists all persistent volumes.
|
||||||
// Interface provides constructor for informer and lister for persistent volumes
|
// Interface provides constructor for informer and lister for persistent volumes
|
||||||
type PVInformer interface {
|
type PVInformer interface {
|
||||||
Informer() framework.SharedIndexInformer
|
Informer() cache.SharedIndexInformer
|
||||||
Lister() *cache.StoreToPVFetcher
|
Lister() *cache.StoreToPVFetcher
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,7 +180,7 @@ type pvInformer struct {
|
||||||
|
|
||||||
// Informer checks whether pvInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
// Informer checks whether pvInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
||||||
// pvInformer and connects it to sharedInformerFactory
|
// pvInformer and connects it to sharedInformerFactory
|
||||||
func (f *pvInformer) Informer() framework.SharedIndexInformer {
|
func (f *pvInformer) Informer() cache.SharedIndexInformer {
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
defer f.lock.Unlock()
|
defer f.lock.Unlock()
|
||||||
|
|
|
@ -24,7 +24,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/watch"
|
"k8s.io/kubernetes/pkg/watch"
|
||||||
)
|
)
|
||||||
|
@ -47,7 +46,7 @@ type sharedInformerFactory struct {
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
defaultResync time.Duration
|
defaultResync time.Duration
|
||||||
|
|
||||||
informers map[reflect.Type]framework.SharedIndexInformer
|
informers map[reflect.Type]cache.SharedIndexInformer
|
||||||
// startedInformers is used for tracking which informers have been started
|
// startedInformers is used for tracking which informers have been started
|
||||||
// this allows calling of Start method multiple times
|
// this allows calling of Start method multiple times
|
||||||
startedInformers map[reflect.Type]bool
|
startedInformers map[reflect.Type]bool
|
||||||
|
@ -58,7 +57,7 @@ func NewSharedInformerFactory(client clientset.Interface, defaultResync time.Dur
|
||||||
return &sharedInformerFactory{
|
return &sharedInformerFactory{
|
||||||
client: client,
|
client: client,
|
||||||
defaultResync: defaultResync,
|
defaultResync: defaultResync,
|
||||||
informers: make(map[reflect.Type]framework.SharedIndexInformer),
|
informers: make(map[reflect.Type]cache.SharedIndexInformer),
|
||||||
startedInformers: make(map[reflect.Type]bool),
|
startedInformers: make(map[reflect.Type]bool),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -102,8 +101,8 @@ func (f *sharedInformerFactory) PersistentVolumes() PVInformer {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPodInformer returns a SharedIndexInformer that lists and watches all pods
|
// NewPodInformer returns a SharedIndexInformer that lists and watches all pods
|
||||||
func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
|
func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||||
sharedIndexInformer := framework.NewSharedIndexInformer(
|
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return client.Core().Pods(api.NamespaceAll).List(options)
|
return client.Core().Pods(api.NamespaceAll).List(options)
|
||||||
|
@ -121,8 +120,8 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) fram
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNodeInformer returns a SharedIndexInformer that lists and watches all nodes
|
// NewNodeInformer returns a SharedIndexInformer that lists and watches all nodes
|
||||||
func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
|
func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||||
sharedIndexInformer := framework.NewSharedIndexInformer(
|
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return client.Core().Nodes().List(options)
|
return client.Core().Nodes().List(options)
|
||||||
|
@ -139,8 +138,8 @@ func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) fra
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPVCInformer returns a SharedIndexInformer that lists and watches all PVCs
|
// NewPVCInformer returns a SharedIndexInformer that lists and watches all PVCs
|
||||||
func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
|
func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||||
sharedIndexInformer := framework.NewSharedIndexInformer(
|
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return client.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
|
return client.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
|
||||||
|
@ -157,8 +156,8 @@ func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) fram
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPVInformer returns a SharedIndexInformer that lists and watches all PVs
|
// NewPVInformer returns a SharedIndexInformer that lists and watches all PVs
|
||||||
func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
|
func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||||
sharedIndexInformer := framework.NewSharedIndexInformer(
|
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return client.Core().PersistentVolumes().List(options)
|
return client.Core().PersistentVolumes().List(options)
|
||||||
|
@ -175,8 +174,8 @@ func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) frame
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNamespaceInformer returns a SharedIndexInformer that lists and watches namespaces
|
// NewNamespaceInformer returns a SharedIndexInformer that lists and watches namespaces
|
||||||
func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
|
func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||||
sharedIndexInformer := framework.NewSharedIndexInformer(
|
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return client.Core().Namespaces().List(options)
|
return client.Core().Namespaces().List(options)
|
|
@ -31,8 +31,7 @@ import (
|
||||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
|
||||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
|
@ -51,7 +50,7 @@ type JobController struct {
|
||||||
// we have a personal informer, we must start it ourselves. If you start
|
// we have a personal informer, we must start it ourselves. If you start
|
||||||
// the controller using NewJobController(passing SharedInformer), this
|
// the controller using NewJobController(passing SharedInformer), this
|
||||||
// will be null
|
// will be null
|
||||||
internalPodInformer framework.SharedInformer
|
internalPodInformer cache.SharedInformer
|
||||||
|
|
||||||
// To allow injection of updateJobStatus for testing.
|
// To allow injection of updateJobStatus for testing.
|
||||||
updateHandler func(job *batch.Job) error
|
updateHandler func(job *batch.Job) error
|
||||||
|
@ -66,7 +65,7 @@ type JobController struct {
|
||||||
// A store of job, populated by the jobController
|
// A store of job, populated by the jobController
|
||||||
jobStore cache.StoreToJobLister
|
jobStore cache.StoreToJobLister
|
||||||
// Watches changes to all jobs
|
// Watches changes to all jobs
|
||||||
jobController *framework.Controller
|
jobController *cache.Controller
|
||||||
|
|
||||||
// A store of pods, populated by the podController
|
// A store of pods, populated by the podController
|
||||||
podStore cache.StoreToPodLister
|
podStore cache.StoreToPodLister
|
||||||
|
@ -77,7 +76,7 @@ type JobController struct {
|
||||||
recorder record.EventRecorder
|
recorder record.EventRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface) *JobController {
|
func NewJobController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface) *JobController {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||||
|
@ -98,7 +97,7 @@ func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clie
|
||||||
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
|
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
|
||||||
}
|
}
|
||||||
|
|
||||||
jm.jobStore.Store, jm.jobController = framework.NewInformer(
|
jm.jobStore.Store, jm.jobController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return jm.kubeClient.Batch().Jobs(api.NamespaceAll).List(options)
|
return jm.kubeClient.Batch().Jobs(api.NamespaceAll).List(options)
|
||||||
|
@ -110,7 +109,7 @@ func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clie
|
||||||
&batch.Job{},
|
&batch.Job{},
|
||||||
// TODO: Can we have much longer period here?
|
// TODO: Can we have much longer period here?
|
||||||
replicationcontroller.FullControllerResyncPeriod,
|
replicationcontroller.FullControllerResyncPeriod,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: jm.enqueueController,
|
AddFunc: jm.enqueueController,
|
||||||
UpdateFunc: func(old, cur interface{}) {
|
UpdateFunc: func(old, cur interface{}) {
|
||||||
if job := cur.(*batch.Job); !IsJobFinished(job) {
|
if job := cur.(*batch.Job); !IsJobFinished(job) {
|
||||||
|
@ -121,7 +120,7 @@ func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clie
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: jm.addPod,
|
AddFunc: jm.addPod,
|
||||||
UpdateFunc: jm.updatePod,
|
UpdateFunc: jm.updatePod,
|
||||||
DeleteFunc: jm.deletePod,
|
DeleteFunc: jm.deletePod,
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||||
|
@ -45,7 +44,7 @@ type NamespaceController struct {
|
||||||
// store that holds the namespaces
|
// store that holds the namespaces
|
||||||
store cache.Store
|
store cache.Store
|
||||||
// controller that observes the namespaces
|
// controller that observes the namespaces
|
||||||
controller *framework.Controller
|
controller *cache.Controller
|
||||||
// namespaces that have been queued up for processing by workers
|
// namespaces that have been queued up for processing by workers
|
||||||
queue workqueue.RateLimitingInterface
|
queue workqueue.RateLimitingInterface
|
||||||
// list of preferred group versions and their corresponding resource set for namespace deletion
|
// list of preferred group versions and their corresponding resource set for namespace deletion
|
||||||
|
@ -95,7 +94,7 @@ func NewNamespaceController(
|
||||||
}
|
}
|
||||||
|
|
||||||
// configure the backing store/controller
|
// configure the backing store/controller
|
||||||
store, controller := framework.NewInformer(
|
store, controller := cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return kubeClient.Core().Namespaces().List(options)
|
return kubeClient.Core().Namespaces().List(options)
|
||||||
|
@ -106,7 +105,7 @@ func NewNamespaceController(
|
||||||
},
|
},
|
||||||
&api.Namespace{},
|
&api.Namespace{},
|
||||||
resyncPeriod,
|
resyncPeriod,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(obj interface{}) {
|
AddFunc: func(obj interface{}) {
|
||||||
namespace := obj.(*api.Namespace)
|
namespace := obj.(*api.Namespace)
|
||||||
namespaceController.enqueueNamespace(namespace)
|
namespaceController.enqueueNamespace(namespace)
|
||||||
|
|
|
@ -33,8 +33,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
|
@ -136,13 +135,13 @@ type NodeController struct {
|
||||||
maximumGracePeriod time.Duration
|
maximumGracePeriod time.Duration
|
||||||
recorder record.EventRecorder
|
recorder record.EventRecorder
|
||||||
// Pod framework and store
|
// Pod framework and store
|
||||||
podController framework.ControllerInterface
|
podController cache.ControllerInterface
|
||||||
podStore cache.StoreToPodLister
|
podStore cache.StoreToPodLister
|
||||||
// Node framework and store
|
// Node framework and store
|
||||||
nodeController *framework.Controller
|
nodeController *cache.Controller
|
||||||
nodeStore cache.StoreToNodeLister
|
nodeStore cache.StoreToNodeLister
|
||||||
// DaemonSet framework and store
|
// DaemonSet framework and store
|
||||||
daemonSetController *framework.Controller
|
daemonSetController *cache.Controller
|
||||||
daemonSetStore cache.StoreToDaemonSetLister
|
daemonSetStore cache.StoreToDaemonSetLister
|
||||||
// allocate/recycle CIDRs for node if allocateNodeCIDRs == true
|
// allocate/recycle CIDRs for node if allocateNodeCIDRs == true
|
||||||
cidrAllocator CIDRAllocator
|
cidrAllocator CIDRAllocator
|
||||||
|
@ -164,7 +163,7 @@ type NodeController struct {
|
||||||
// we have a personal informer, we must start it ourselves. If you start
|
// we have a personal informer, we must start it ourselves. If you start
|
||||||
// the controller using NewDaemonSetsController(passing SharedInformer), this
|
// the controller using NewDaemonSetsController(passing SharedInformer), this
|
||||||
// will be null
|
// will be null
|
||||||
internalPodInformer framework.SharedIndexInformer
|
internalPodInformer cache.SharedIndexInformer
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNodeController returns a new node controller to sync instances from cloudprovider.
|
// NewNodeController returns a new node controller to sync instances from cloudprovider.
|
||||||
|
@ -172,7 +171,7 @@ type NodeController struct {
|
||||||
// podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes
|
// podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes
|
||||||
// currently, this should be handled as a fatal error.
|
// currently, this should be handled as a fatal error.
|
||||||
func NewNodeController(
|
func NewNodeController(
|
||||||
podInformer framework.SharedIndexInformer,
|
podInformer cache.SharedIndexInformer,
|
||||||
cloud cloudprovider.Interface,
|
cloud cloudprovider.Interface,
|
||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
podEvictionTimeout time.Duration,
|
podEvictionTimeout time.Duration,
|
||||||
|
@ -241,16 +240,16 @@ func NewNodeController(
|
||||||
nc.enterFullDisruptionFunc = nc.HealthyQPSFunc
|
nc.enterFullDisruptionFunc = nc.HealthyQPSFunc
|
||||||
nc.computeZoneStateFunc = nc.ComputeZoneState
|
nc.computeZoneStateFunc = nc.ComputeZoneState
|
||||||
|
|
||||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: nc.maybeDeleteTerminatingPod,
|
AddFunc: nc.maybeDeleteTerminatingPod,
|
||||||
UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) },
|
UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) },
|
||||||
})
|
})
|
||||||
nc.podStore.Indexer = podInformer.GetIndexer()
|
nc.podStore.Indexer = podInformer.GetIndexer()
|
||||||
nc.podController = podInformer.GetController()
|
nc.podController = podInformer.GetController()
|
||||||
|
|
||||||
nodeEventHandlerFuncs := framework.ResourceEventHandlerFuncs{}
|
nodeEventHandlerFuncs := cache.ResourceEventHandlerFuncs{}
|
||||||
if nc.allocateNodeCIDRs {
|
if nc.allocateNodeCIDRs {
|
||||||
nodeEventHandlerFuncs = framework.ResourceEventHandlerFuncs{
|
nodeEventHandlerFuncs = cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(obj interface{}) {
|
AddFunc: func(obj interface{}) {
|
||||||
node := obj.(*api.Node)
|
node := obj.(*api.Node)
|
||||||
err := nc.cidrAllocator.AllocateOrOccupyCIDR(node)
|
err := nc.cidrAllocator.AllocateOrOccupyCIDR(node)
|
||||||
|
@ -296,7 +295,7 @@ func NewNodeController(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nc.nodeStore.Store, nc.nodeController = framework.NewInformer(
|
nc.nodeStore.Store, nc.nodeController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return nc.kubeClient.Core().Nodes().List(options)
|
return nc.kubeClient.Core().Nodes().List(options)
|
||||||
|
@ -310,7 +309,7 @@ func NewNodeController(
|
||||||
nodeEventHandlerFuncs,
|
nodeEventHandlerFuncs,
|
||||||
)
|
)
|
||||||
|
|
||||||
nc.daemonSetStore.Store, nc.daemonSetController = framework.NewInformer(
|
nc.daemonSetStore.Store, nc.daemonSetController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return nc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options)
|
return nc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options)
|
||||||
|
@ -321,7 +320,7 @@ func NewNodeController(
|
||||||
},
|
},
|
||||||
&extensions.DaemonSet{},
|
&extensions.DaemonSet{},
|
||||||
controller.NoResyncPeriodFunc(),
|
controller.NoResyncPeriodFunc(),
|
||||||
framework.ResourceEventHandlerFuncs{},
|
cache.ResourceEventHandlerFuncs{},
|
||||||
)
|
)
|
||||||
|
|
||||||
if allocateNodeCIDRs {
|
if allocateNodeCIDRs {
|
||||||
|
|
|
@ -29,7 +29,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/errors"
|
"k8s.io/kubernetes/pkg/util/errors"
|
||||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||||
|
@ -63,12 +62,12 @@ type PetSetController struct {
|
||||||
// podStoreSynced returns true if the pod store has synced at least once.
|
// podStoreSynced returns true if the pod store has synced at least once.
|
||||||
podStoreSynced func() bool
|
podStoreSynced func() bool
|
||||||
// Watches changes to all pods.
|
// Watches changes to all pods.
|
||||||
podController framework.ControllerInterface
|
podController cache.ControllerInterface
|
||||||
|
|
||||||
// A store of PetSets, populated by the psController.
|
// A store of PetSets, populated by the psController.
|
||||||
psStore cache.StoreToPetSetLister
|
psStore cache.StoreToPetSetLister
|
||||||
// Watches changes to all PetSets.
|
// Watches changes to all PetSets.
|
||||||
psController *framework.Controller
|
psController *cache.Controller
|
||||||
|
|
||||||
// A store of the 1 unhealthy pet blocking progress for a given ps
|
// A store of the 1 unhealthy pet blocking progress for a given ps
|
||||||
blockingPetStore *unhealthyPetTracker
|
blockingPetStore *unhealthyPetTracker
|
||||||
|
@ -82,7 +81,7 @@ type PetSetController struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPetSetController creates a new petset controller.
|
// NewPetSetController creates a new petset controller.
|
||||||
func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController {
|
func NewPetSetController(podInformer cache.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
|
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||||
|
@ -98,7 +97,7 @@ func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "petset"),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "petset"),
|
||||||
}
|
}
|
||||||
|
|
||||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
// lookup the petset and enqueue
|
// lookup the petset and enqueue
|
||||||
AddFunc: psc.addPod,
|
AddFunc: psc.addPod,
|
||||||
// lookup current and old petset if labels changed
|
// lookup current and old petset if labels changed
|
||||||
|
@ -109,7 +108,7 @@ func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *
|
||||||
psc.podStore.Indexer = podInformer.GetIndexer()
|
psc.podStore.Indexer = podInformer.GetIndexer()
|
||||||
psc.podController = podInformer.GetController()
|
psc.podController = podInformer.GetController()
|
||||||
|
|
||||||
psc.psStore.Store, psc.psController = framework.NewInformer(
|
psc.psStore.Store, psc.psController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options)
|
return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options)
|
||||||
|
@ -120,7 +119,7 @@ func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *
|
||||||
},
|
},
|
||||||
&apps.PetSet{},
|
&apps.PetSet{},
|
||||||
petSetResyncPeriod,
|
petSetResyncPeriod,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: psc.enqueuePetSet,
|
AddFunc: psc.enqueuePetSet,
|
||||||
UpdateFunc: func(old, cur interface{}) {
|
UpdateFunc: func(old, cur interface{}) {
|
||||||
oldPS := old.(*apps.PetSet)
|
oldPS := old.(*apps.PetSet)
|
||||||
|
|
|
@ -33,7 +33,6 @@ import (
|
||||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
|
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||||
|
@ -61,14 +60,14 @@ type HorizontalController struct {
|
||||||
// A store of HPA objects, populated by the controller.
|
// A store of HPA objects, populated by the controller.
|
||||||
store cache.Store
|
store cache.Store
|
||||||
// Watches changes to all HPA objects.
|
// Watches changes to all HPA objects.
|
||||||
controller *framework.Controller
|
controller *cache.Controller
|
||||||
}
|
}
|
||||||
|
|
||||||
var downscaleForbiddenWindow = 5 * time.Minute
|
var downscaleForbiddenWindow = 5 * time.Minute
|
||||||
var upscaleForbiddenWindow = 3 * time.Minute
|
var upscaleForbiddenWindow = 3 * time.Minute
|
||||||
|
|
||||||
func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, *framework.Controller) {
|
func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, *cache.Controller) {
|
||||||
return framework.NewInformer(
|
return cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).List(options)
|
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).List(options)
|
||||||
|
@ -79,7 +78,7 @@ func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (
|
||||||
},
|
},
|
||||||
&autoscaling.HorizontalPodAutoscaler{},
|
&autoscaling.HorizontalPodAutoscaler{},
|
||||||
resyncPeriod,
|
resyncPeriod,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(obj interface{}) {
|
AddFunc: func(obj interface{}) {
|
||||||
hpa := obj.(*autoscaling.HorizontalPodAutoscaler)
|
hpa := obj.(*autoscaling.HorizontalPodAutoscaler)
|
||||||
hasCPUPolicy := hpa.Spec.TargetCPUUtilizationPercentage != nil
|
hasCPUPolicy := hpa.Spec.TargetCPUUtilizationPercentage != nil
|
||||||
|
|
6
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go
generated
vendored
|
@ -123,8 +123,8 @@ func (h *HeapsterMetricsClient) GetCpuConsumptionAndRequestInMillis(namespace st
|
||||||
requestSum := int64(0)
|
requestSum := int64(0)
|
||||||
missing := false
|
missing := false
|
||||||
for _, pod := range podList.Items {
|
for _, pod := range podList.Items {
|
||||||
if pod.Status.Phase == api.PodPending {
|
if pod.Status.Phase != api.PodRunning {
|
||||||
// Skip pending pods.
|
// Count only running pods.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,7 +144,7 @@ func (h *HeapsterMetricsClient) GetCpuConsumptionAndRequestInMillis(namespace st
|
||||||
return 0, 0, time.Time{}, fmt.Errorf("some pods do not have request for cpu")
|
return 0, 0, time.Time{}, fmt.Errorf("some pods do not have request for cpu")
|
||||||
}
|
}
|
||||||
glog.V(4).Infof("%s %s - sum of CPU requested: %d", namespace, selector, requestSum)
|
glog.V(4).Infof("%s %s - sum of CPU requested: %d", namespace, selector, requestSum)
|
||||||
requestAvg := requestSum / int64(len(podList.Items))
|
requestAvg := requestSum / int64(len(podNames))
|
||||||
// Consumption is already averaged and in millis.
|
// Consumption is already averaged and in millis.
|
||||||
consumption, timestamp, err := h.getCpuUtilizationForPods(namespace, selector, podNames)
|
consumption, timestamp, err := h.getCpuUtilizationForPods(namespace, selector, podNames)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
|
@ -44,7 +43,7 @@ const (
|
||||||
type PodGCController struct {
|
type PodGCController struct {
|
||||||
kubeClient clientset.Interface
|
kubeClient clientset.Interface
|
||||||
podStore cache.StoreToPodLister
|
podStore cache.StoreToPodLister
|
||||||
podStoreSyncer *framework.Controller
|
podStoreSyncer *cache.Controller
|
||||||
deletePod func(namespace, name string) error
|
deletePod func(namespace, name string) error
|
||||||
threshold int
|
threshold int
|
||||||
}
|
}
|
||||||
|
@ -63,7 +62,7 @@ func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFun
|
||||||
|
|
||||||
terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown))
|
terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown))
|
||||||
|
|
||||||
gcc.podStore.Indexer, gcc.podStoreSyncer = framework.NewIndexerInformer(
|
gcc.podStore.Indexer, gcc.podStoreSyncer = cache.NewIndexerInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
options.FieldSelector = terminatedSelector
|
options.FieldSelector = terminatedSelector
|
||||||
|
@ -76,7 +75,7 @@ func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFun
|
||||||
},
|
},
|
||||||
&api.Pod{},
|
&api.Pod{},
|
||||||
resyncPeriod(),
|
resyncPeriod(),
|
||||||
framework.ResourceEventHandlerFuncs{},
|
cache.ResourceEventHandlerFuncs{},
|
||||||
// We don't need to build a index for podStore here actually, but build one for consistency.
|
// We don't need to build a index for podStore here actually, but build one for consistency.
|
||||||
// It will ensure that if people start making use of the podStore in more specific ways,
|
// It will ensure that if people start making use of the podStore in more specific ways,
|
||||||
// they'll get the benefits they expect. It will also reserve the name for future refactorings.
|
// they'll get the benefits they expect. It will also reserve the name for future refactorings.
|
||||||
|
|
|
@ -36,8 +36,7 @@ import (
|
||||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||||
|
@ -81,7 +80,7 @@ type ReplicaSetController struct {
|
||||||
// we have a personal informer, we must start it ourselves. If you start
|
// we have a personal informer, we must start it ourselves. If you start
|
||||||
// the controller using NewReplicationManager(passing SharedInformer), this
|
// the controller using NewReplicationManager(passing SharedInformer), this
|
||||||
// will be null
|
// will be null
|
||||||
internalPodInformer framework.SharedIndexInformer
|
internalPodInformer cache.SharedIndexInformer
|
||||||
|
|
||||||
// A ReplicaSet is temporarily suspended after creating/deleting these many replicas.
|
// A ReplicaSet is temporarily suspended after creating/deleting these many replicas.
|
||||||
// It resumes normal action after observing the watch events for them.
|
// It resumes normal action after observing the watch events for them.
|
||||||
|
@ -95,11 +94,11 @@ type ReplicaSetController struct {
|
||||||
// A store of ReplicaSets, populated by the rsController
|
// A store of ReplicaSets, populated by the rsController
|
||||||
rsStore cache.StoreToReplicaSetLister
|
rsStore cache.StoreToReplicaSetLister
|
||||||
// Watches changes to all ReplicaSets
|
// Watches changes to all ReplicaSets
|
||||||
rsController *framework.Controller
|
rsController *cache.Controller
|
||||||
// A store of pods, populated by the podController
|
// A store of pods, populated by the podController
|
||||||
podStore cache.StoreToPodLister
|
podStore cache.StoreToPodLister
|
||||||
// Watches changes to all pods
|
// Watches changes to all pods
|
||||||
podController framework.ControllerInterface
|
podController cache.ControllerInterface
|
||||||
// podStoreSynced returns true if the pod store has been synced at least once.
|
// podStoreSynced returns true if the pod store has been synced at least once.
|
||||||
// Added as a member to the struct to allow injection for testing.
|
// Added as a member to the struct to allow injection for testing.
|
||||||
podStoreSynced func() bool
|
podStoreSynced func() bool
|
||||||
|
@ -115,7 +114,7 @@ type ReplicaSetController struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReplicaSetController creates a new ReplicaSetController.
|
// NewReplicaSetController creates a new ReplicaSetController.
|
||||||
func NewReplicaSetController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
|
func NewReplicaSetController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||||
|
@ -126,7 +125,7 @@ func NewReplicaSetController(podInformer framework.SharedIndexInformer, kubeClie
|
||||||
}
|
}
|
||||||
|
|
||||||
// newReplicaSetController configures a replica set controller with the specified event recorder
|
// newReplicaSetController configures a replica set controller with the specified event recorder
|
||||||
func newReplicaSetController(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
|
func newReplicaSetController(eventRecorder record.EventRecorder, podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
|
||||||
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
|
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
|
||||||
metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
|
metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
|
||||||
}
|
}
|
||||||
|
@ -143,7 +142,7 @@ func newReplicaSetController(eventRecorder record.EventRecorder, podInformer fra
|
||||||
garbageCollectorEnabled: garbageCollectorEnabled,
|
garbageCollectorEnabled: garbageCollectorEnabled,
|
||||||
}
|
}
|
||||||
|
|
||||||
rsc.rsStore.Store, rsc.rsController = framework.NewInformer(
|
rsc.rsStore.Store, rsc.rsController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return rsc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
|
return rsc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
|
||||||
|
@ -155,7 +154,7 @@ func newReplicaSetController(eventRecorder record.EventRecorder, podInformer fra
|
||||||
&extensions.ReplicaSet{},
|
&extensions.ReplicaSet{},
|
||||||
// TODO: Can we have much longer period here?
|
// TODO: Can we have much longer period here?
|
||||||
FullControllerResyncPeriod,
|
FullControllerResyncPeriod,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: rsc.enqueueReplicaSet,
|
AddFunc: rsc.enqueueReplicaSet,
|
||||||
UpdateFunc: rsc.updateRS,
|
UpdateFunc: rsc.updateRS,
|
||||||
// This will enter the sync loop and no-op, because the replica set has been deleted from the store.
|
// This will enter the sync loop and no-op, because the replica set has been deleted from the store.
|
||||||
|
@ -165,7 +164,7 @@ func newReplicaSetController(eventRecorder record.EventRecorder, podInformer fra
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: rsc.addPod,
|
AddFunc: rsc.addPod,
|
||||||
// This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like
|
// This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like
|
||||||
// overkill the most frequent pod update is status, and the associated ReplicaSet will only list from
|
// overkill the most frequent pod update is status, and the associated ReplicaSet will only list from
|
||||||
|
|
|
@ -34,8 +34,7 @@ import (
|
||||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util"
|
"k8s.io/kubernetes/pkg/util"
|
||||||
|
@ -86,7 +85,7 @@ type ReplicationManager struct {
|
||||||
// we have a personal informer, we must start it ourselves. If you start
|
// we have a personal informer, we must start it ourselves. If you start
|
||||||
// the controller using NewReplicationManager(passing SharedInformer), this
|
// the controller using NewReplicationManager(passing SharedInformer), this
|
||||||
// will be null
|
// will be null
|
||||||
internalPodInformer framework.SharedIndexInformer
|
internalPodInformer cache.SharedIndexInformer
|
||||||
|
|
||||||
// An rc is temporarily suspended after creating/deleting these many replicas.
|
// An rc is temporarily suspended after creating/deleting these many replicas.
|
||||||
// It resumes normal action after observing the watch events for them.
|
// It resumes normal action after observing the watch events for them.
|
||||||
|
@ -100,11 +99,11 @@ type ReplicationManager struct {
|
||||||
// A store of replication controllers, populated by the rcController
|
// A store of replication controllers, populated by the rcController
|
||||||
rcStore cache.StoreToReplicationControllerLister
|
rcStore cache.StoreToReplicationControllerLister
|
||||||
// Watches changes to all replication controllers
|
// Watches changes to all replication controllers
|
||||||
rcController *framework.Controller
|
rcController *cache.Controller
|
||||||
// A store of pods, populated by the podController
|
// A store of pods, populated by the podController
|
||||||
podStore cache.StoreToPodLister
|
podStore cache.StoreToPodLister
|
||||||
// Watches changes to all pods
|
// Watches changes to all pods
|
||||||
podController framework.ControllerInterface
|
podController cache.ControllerInterface
|
||||||
// podStoreSynced returns true if the pod store has been synced at least once.
|
// podStoreSynced returns true if the pod store has been synced at least once.
|
||||||
// Added as a member to the struct to allow injection for testing.
|
// Added as a member to the struct to allow injection for testing.
|
||||||
podStoreSynced func() bool
|
podStoreSynced func() bool
|
||||||
|
@ -120,7 +119,7 @@ type ReplicationManager struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReplicationManager creates a replication manager
|
// NewReplicationManager creates a replication manager
|
||||||
func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
|
func NewReplicationManager(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||||
|
@ -130,7 +129,7 @@ func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient
|
||||||
}
|
}
|
||||||
|
|
||||||
// newReplicationManager configures a replication manager with the specified event recorder
|
// newReplicationManager configures a replication manager with the specified event recorder
|
||||||
func newReplicationManager(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
|
func newReplicationManager(eventRecorder record.EventRecorder, podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
|
||||||
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
|
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
|
||||||
metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
|
metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
|
||||||
}
|
}
|
||||||
|
@ -147,7 +146,7 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer frame
|
||||||
garbageCollectorEnabled: garbageCollectorEnabled,
|
garbageCollectorEnabled: garbageCollectorEnabled,
|
||||||
}
|
}
|
||||||
|
|
||||||
rm.rcStore.Indexer, rm.rcController = framework.NewIndexerInformer(
|
rm.rcStore.Indexer, rm.rcController = cache.NewIndexerInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
|
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
|
||||||
|
@ -159,7 +158,7 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer frame
|
||||||
&api.ReplicationController{},
|
&api.ReplicationController{},
|
||||||
// TODO: Can we have much longer period here?
|
// TODO: Can we have much longer period here?
|
||||||
FullControllerResyncPeriod,
|
FullControllerResyncPeriod,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: rm.enqueueController,
|
AddFunc: rm.enqueueController,
|
||||||
UpdateFunc: rm.updateRC,
|
UpdateFunc: rm.updateRC,
|
||||||
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
|
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
|
||||||
|
@ -170,7 +169,7 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer frame
|
||||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||||
)
|
)
|
||||||
|
|
||||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: rm.addPod,
|
AddFunc: rm.addPod,
|
||||||
// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
|
// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
|
||||||
// the most frequent pod update is status, and the associated rc will only list from local storage, so
|
// the most frequent pod update is status, and the associated rc will only list from local storage, so
|
||||||
|
|
37
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller.go
generated
vendored
37
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller.go
generated
vendored
|
@ -27,8 +27,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/quota/evaluator/core"
|
"k8s.io/kubernetes/pkg/quota/evaluator/core"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
|
@ -90,18 +89,18 @@ func ObjectReplenishmentDeleteFunc(options *ReplenishmentControllerOptions) func
|
||||||
type ReplenishmentControllerFactory interface {
|
type ReplenishmentControllerFactory interface {
|
||||||
// NewController returns a controller configured with the specified options.
|
// NewController returns a controller configured with the specified options.
|
||||||
// This method is NOT thread-safe.
|
// This method is NOT thread-safe.
|
||||||
NewController(options *ReplenishmentControllerOptions) (framework.ControllerInterface, error)
|
NewController(options *ReplenishmentControllerOptions) (cache.ControllerInterface, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// replenishmentControllerFactory implements ReplenishmentControllerFactory
|
// replenishmentControllerFactory implements ReplenishmentControllerFactory
|
||||||
type replenishmentControllerFactory struct {
|
type replenishmentControllerFactory struct {
|
||||||
kubeClient clientset.Interface
|
kubeClient clientset.Interface
|
||||||
podInformer framework.SharedInformer
|
podInformer cache.SharedInformer
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReplenishmentControllerFactory returns a factory that knows how to build controllers
|
// NewReplenishmentControllerFactory returns a factory that knows how to build controllers
|
||||||
// to replenish resources when updated or deleted
|
// to replenish resources when updated or deleted
|
||||||
func NewReplenishmentControllerFactory(podInformer framework.SharedInformer, kubeClient clientset.Interface) ReplenishmentControllerFactory {
|
func NewReplenishmentControllerFactory(podInformer cache.SharedInformer, kubeClient clientset.Interface) ReplenishmentControllerFactory {
|
||||||
return &replenishmentControllerFactory{
|
return &replenishmentControllerFactory{
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
podInformer: podInformer,
|
podInformer: podInformer,
|
||||||
|
@ -112,8 +111,8 @@ func NewReplenishmentControllerFactoryFromClient(kubeClient clientset.Interface)
|
||||||
return NewReplenishmentControllerFactory(nil, kubeClient)
|
return NewReplenishmentControllerFactory(nil, kubeClient)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *replenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (framework.ControllerInterface, error) {
|
func (r *replenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (cache.ControllerInterface, error) {
|
||||||
var result framework.ControllerInterface
|
var result cache.ControllerInterface
|
||||||
if r.kubeClient != nil && r.kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
|
if r.kubeClient != nil && r.kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
|
||||||
metrics.RegisterMetricAndTrackRateLimiterUsage("replenishment_controller", r.kubeClient.Core().GetRESTClient().GetRateLimiter())
|
metrics.RegisterMetricAndTrackRateLimiterUsage("replenishment_controller", r.kubeClient.Core().GetRESTClient().GetRateLimiter())
|
||||||
}
|
}
|
||||||
|
@ -121,7 +120,7 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
||||||
switch options.GroupKind {
|
switch options.GroupKind {
|
||||||
case api.Kind("Pod"):
|
case api.Kind("Pod"):
|
||||||
if r.podInformer != nil {
|
if r.podInformer != nil {
|
||||||
r.podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
r.podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
UpdateFunc: PodReplenishmentUpdateFunc(options),
|
UpdateFunc: PodReplenishmentUpdateFunc(options),
|
||||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||||
})
|
})
|
||||||
|
@ -133,7 +132,7 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
||||||
result = r.podInformer
|
result = r.podInformer
|
||||||
|
|
||||||
case api.Kind("Service"):
|
case api.Kind("Service"):
|
||||||
_, result = framework.NewInformer(
|
_, result = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return r.kubeClient.Core().Services(api.NamespaceAll).List(options)
|
return r.kubeClient.Core().Services(api.NamespaceAll).List(options)
|
||||||
|
@ -144,13 +143,13 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
||||||
},
|
},
|
||||||
&api.Service{},
|
&api.Service{},
|
||||||
options.ResyncPeriod(),
|
options.ResyncPeriod(),
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
UpdateFunc: ServiceReplenishmentUpdateFunc(options),
|
UpdateFunc: ServiceReplenishmentUpdateFunc(options),
|
||||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
case api.Kind("ReplicationController"):
|
case api.Kind("ReplicationController"):
|
||||||
_, result = framework.NewInformer(
|
_, result = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return r.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
|
return r.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
|
||||||
|
@ -161,12 +160,12 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
||||||
},
|
},
|
||||||
&api.ReplicationController{},
|
&api.ReplicationController{},
|
||||||
options.ResyncPeriod(),
|
options.ResyncPeriod(),
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
case api.Kind("PersistentVolumeClaim"):
|
case api.Kind("PersistentVolumeClaim"):
|
||||||
_, result = framework.NewInformer(
|
_, result = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return r.kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
|
return r.kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
|
||||||
|
@ -177,12 +176,12 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
||||||
},
|
},
|
||||||
&api.PersistentVolumeClaim{},
|
&api.PersistentVolumeClaim{},
|
||||||
options.ResyncPeriod(),
|
options.ResyncPeriod(),
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
case api.Kind("Secret"):
|
case api.Kind("Secret"):
|
||||||
_, result = framework.NewInformer(
|
_, result = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return r.kubeClient.Core().Secrets(api.NamespaceAll).List(options)
|
return r.kubeClient.Core().Secrets(api.NamespaceAll).List(options)
|
||||||
|
@ -193,12 +192,12 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
||||||
},
|
},
|
||||||
&api.Secret{},
|
&api.Secret{},
|
||||||
options.ResyncPeriod(),
|
options.ResyncPeriod(),
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
case api.Kind("ConfigMap"):
|
case api.Kind("ConfigMap"):
|
||||||
_, result = framework.NewInformer(
|
_, result = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return r.kubeClient.Core().ConfigMaps(api.NamespaceAll).List(options)
|
return r.kubeClient.Core().ConfigMaps(api.NamespaceAll).List(options)
|
||||||
|
@ -209,7 +208,7 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
||||||
},
|
},
|
||||||
&api.ConfigMap{},
|
&api.ConfigMap{},
|
||||||
options.ResyncPeriod(),
|
options.ResyncPeriod(),
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -254,7 +253,7 @@ func IsUnhandledGroupKindError(err error) bool {
|
||||||
// returning the first success or failure it hits. If there are no hits either way, it return an UnhandledGroupKind error
|
// returning the first success or failure it hits. If there are no hits either way, it return an UnhandledGroupKind error
|
||||||
type UnionReplenishmentControllerFactory []ReplenishmentControllerFactory
|
type UnionReplenishmentControllerFactory []ReplenishmentControllerFactory
|
||||||
|
|
||||||
func (f UnionReplenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (framework.ControllerInterface, error) {
|
func (f UnionReplenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (cache.ControllerInterface, error) {
|
||||||
for _, factory := range f {
|
for _, factory := range f {
|
||||||
controller, err := factory.NewController(options)
|
controller, err := factory.NewController(options)
|
||||||
if !IsUnhandledGroupKindError(err) {
|
if !IsUnhandledGroupKindError(err) {
|
||||||
|
|
11
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go
generated
vendored
|
@ -26,7 +26,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/quota"
|
"k8s.io/kubernetes/pkg/quota"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
|
@ -60,7 +59,7 @@ type ResourceQuotaController struct {
|
||||||
// An index of resource quota objects by namespace
|
// An index of resource quota objects by namespace
|
||||||
rqIndexer cache.Indexer
|
rqIndexer cache.Indexer
|
||||||
// Watches changes to all resource quota
|
// Watches changes to all resource quota
|
||||||
rqController *framework.Controller
|
rqController *cache.Controller
|
||||||
// ResourceQuota objects that need to be synchronized
|
// ResourceQuota objects that need to be synchronized
|
||||||
queue workqueue.RateLimitingInterface
|
queue workqueue.RateLimitingInterface
|
||||||
// missingUsageQueue holds objects that are missing the initial usage informatino
|
// missingUsageQueue holds objects that are missing the initial usage informatino
|
||||||
|
@ -72,7 +71,7 @@ type ResourceQuotaController struct {
|
||||||
// knows how to calculate usage
|
// knows how to calculate usage
|
||||||
registry quota.Registry
|
registry quota.Registry
|
||||||
// controllers monitoring to notify for replenishment
|
// controllers monitoring to notify for replenishment
|
||||||
replenishmentControllers []framework.ControllerInterface
|
replenishmentControllers []cache.ControllerInterface
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *ResourceQuotaController {
|
func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *ResourceQuotaController {
|
||||||
|
@ -83,7 +82,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
|
||||||
missingUsageQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_priority"),
|
missingUsageQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_priority"),
|
||||||
resyncPeriod: options.ResyncPeriod,
|
resyncPeriod: options.ResyncPeriod,
|
||||||
registry: options.Registry,
|
registry: options.Registry,
|
||||||
replenishmentControllers: []framework.ControllerInterface{},
|
replenishmentControllers: []cache.ControllerInterface{},
|
||||||
}
|
}
|
||||||
if options.KubeClient != nil && options.KubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
|
if options.KubeClient != nil && options.KubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
|
||||||
metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", options.KubeClient.Core().GetRESTClient().GetRateLimiter())
|
metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", options.KubeClient.Core().GetRESTClient().GetRateLimiter())
|
||||||
|
@ -92,7 +91,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
|
||||||
rq.syncHandler = rq.syncResourceQuotaFromKey
|
rq.syncHandler = rq.syncResourceQuotaFromKey
|
||||||
|
|
||||||
// build the controller that observes quota
|
// build the controller that observes quota
|
||||||
rq.rqIndexer, rq.rqController = framework.NewIndexerInformer(
|
rq.rqIndexer, rq.rqController = cache.NewIndexerInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).List(options)
|
return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).List(options)
|
||||||
|
@ -103,7 +102,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
|
||||||
},
|
},
|
||||||
&api.ResourceQuota{},
|
&api.ResourceQuota{},
|
||||||
rq.resyncPeriod(),
|
rq.resyncPeriod(),
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: rq.addQuota,
|
AddFunc: rq.addQuota,
|
||||||
UpdateFunc: func(old, cur interface{}) {
|
UpdateFunc: func(old, cur interface{}) {
|
||||||
// We are only interested in observing updates to quota.spec to drive updates to quota.status.
|
// We are only interested in observing updates to quota.spec to drive updates to quota.status.
|
||||||
|
|
|
@ -80,7 +80,7 @@ func (rc *RouteController) reconcileNodeRoutes() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error listing routes: %v", err)
|
return fmt.Errorf("error listing routes: %v", err)
|
||||||
}
|
}
|
||||||
// TODO (cjcullen): use pkg/controller/framework.NewInformer to watch this
|
// TODO (cjcullen): use pkg/controller/cache.NewInformer to watch this
|
||||||
// and reduce the number of lists needed.
|
// and reduce the number of lists needed.
|
||||||
nodeList, err := rc.kubeClient.Core().Nodes().List(api.ListOptions{})
|
nodeList, err := rc.kubeClient.Core().Nodes().List(api.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -33,7 +33,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
|
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
|
@ -88,7 +87,7 @@ type ServiceController struct {
|
||||||
// A store of services, populated by the serviceController
|
// A store of services, populated by the serviceController
|
||||||
serviceStore cache.StoreToServiceLister
|
serviceStore cache.StoreToServiceLister
|
||||||
// Watches changes to all services
|
// Watches changes to all services
|
||||||
serviceController *framework.Controller
|
serviceController *cache.Controller
|
||||||
eventBroadcaster record.EventBroadcaster
|
eventBroadcaster record.EventBroadcaster
|
||||||
eventRecorder record.EventRecorder
|
eventRecorder record.EventRecorder
|
||||||
nodeLister cache.StoreToNodeLister
|
nodeLister cache.StoreToNodeLister
|
||||||
|
@ -120,7 +119,7 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
|
||||||
},
|
},
|
||||||
workingQueue: workqueue.NewDelayingQueue(),
|
workingQueue: workqueue.NewDelayingQueue(),
|
||||||
}
|
}
|
||||||
s.serviceStore.Store, s.serviceController = framework.NewInformer(
|
s.serviceStore.Store, s.serviceController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
|
||||||
return s.kubeClient.Core().Services(api.NamespaceAll).List(options)
|
return s.kubeClient.Core().Services(api.NamespaceAll).List(options)
|
||||||
|
@ -131,7 +130,7 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
|
||||||
},
|
},
|
||||||
&api.Service{},
|
&api.Service{},
|
||||||
serviceSyncPeriod,
|
serviceSyncPeriod,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: s.enqueueService,
|
AddFunc: s.enqueueService,
|
||||||
UpdateFunc: func(old, cur interface{}) {
|
UpdateFunc: func(old, cur interface{}) {
|
||||||
oldSvc, ok1 := old.(*api.Service)
|
oldSvc, ok1 := old.(*api.Service)
|
||||||
|
|
13
vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller.go
generated
vendored
|
@ -26,7 +26,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/meta"
|
"k8s.io/kubernetes/pkg/api/meta"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
|
@ -80,7 +79,7 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount
|
||||||
// If we're maintaining a single account, we can scope the accounts we watch to just that name
|
// If we're maintaining a single account, we can scope the accounts we watch to just that name
|
||||||
accountSelector = fields.SelectorFromSet(map[string]string{api.ObjectNameField: options.ServiceAccounts[0].Name})
|
accountSelector = fields.SelectorFromSet(map[string]string{api.ObjectNameField: options.ServiceAccounts[0].Name})
|
||||||
}
|
}
|
||||||
e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
|
e.serviceAccounts, e.serviceAccountController = cache.NewIndexerInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
options.FieldSelector = accountSelector
|
options.FieldSelector = accountSelector
|
||||||
|
@ -93,13 +92,13 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount
|
||||||
},
|
},
|
||||||
&api.ServiceAccount{},
|
&api.ServiceAccount{},
|
||||||
options.ServiceAccountResync,
|
options.ServiceAccountResync,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
DeleteFunc: e.serviceAccountDeleted,
|
DeleteFunc: e.serviceAccountDeleted,
|
||||||
},
|
},
|
||||||
cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
|
cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
|
||||||
)
|
)
|
||||||
|
|
||||||
e.namespaces, e.namespaceController = framework.NewIndexerInformer(
|
e.namespaces, e.namespaceController = cache.NewIndexerInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return e.client.Core().Namespaces().List(options)
|
return e.client.Core().Namespaces().List(options)
|
||||||
|
@ -110,7 +109,7 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount
|
||||||
},
|
},
|
||||||
&api.Namespace{},
|
&api.Namespace{},
|
||||||
options.NamespaceResync,
|
options.NamespaceResync,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: e.namespaceAdded,
|
AddFunc: e.namespaceAdded,
|
||||||
UpdateFunc: e.namespaceUpdated,
|
UpdateFunc: e.namespaceUpdated,
|
||||||
},
|
},
|
||||||
|
@ -131,8 +130,8 @@ type ServiceAccountsController struct {
|
||||||
namespaces cache.Indexer
|
namespaces cache.Indexer
|
||||||
|
|
||||||
// Since we join two objects, we'll watch both of them with controllers.
|
// Since we join two objects, we'll watch both of them with controllers.
|
||||||
serviceAccountController *framework.Controller
|
serviceAccountController *cache.Controller
|
||||||
namespaceController *framework.Controller
|
namespaceController *cache.Controller
|
||||||
}
|
}
|
||||||
|
|
||||||
// Runs controller loops and returns immediately
|
// Runs controller loops and returns immediately
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
"k8s.io/kubernetes/pkg/registry/secret"
|
"k8s.io/kubernetes/pkg/registry/secret"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
|
@ -90,7 +89,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
|
||||||
metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().GetRESTClient().GetRateLimiter())
|
metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().GetRESTClient().GetRateLimiter())
|
||||||
}
|
}
|
||||||
|
|
||||||
e.serviceAccounts, e.serviceAccountController = framework.NewInformer(
|
e.serviceAccounts, e.serviceAccountController = cache.NewInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
|
return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
|
||||||
|
@ -101,7 +100,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
|
||||||
},
|
},
|
||||||
&api.ServiceAccount{},
|
&api.ServiceAccount{},
|
||||||
options.ServiceAccountResync,
|
options.ServiceAccountResync,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: e.queueServiceAccountSync,
|
AddFunc: e.queueServiceAccountSync,
|
||||||
UpdateFunc: e.queueServiceAccountUpdateSync,
|
UpdateFunc: e.queueServiceAccountUpdateSync,
|
||||||
DeleteFunc: e.queueServiceAccountSync,
|
DeleteFunc: e.queueServiceAccountSync,
|
||||||
|
@ -109,7 +108,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
|
||||||
)
|
)
|
||||||
|
|
||||||
tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
|
tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
|
||||||
e.secrets, e.secretController = framework.NewIndexerInformer(
|
e.secrets, e.secretController = cache.NewIndexerInformer(
|
||||||
&cache.ListWatch{
|
&cache.ListWatch{
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
options.FieldSelector = tokenSelector
|
options.FieldSelector = tokenSelector
|
||||||
|
@ -122,7 +121,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
|
||||||
},
|
},
|
||||||
&api.Secret{},
|
&api.Secret{},
|
||||||
options.SecretResync,
|
options.SecretResync,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: e.queueSecretSync,
|
AddFunc: e.queueSecretSync,
|
||||||
UpdateFunc: e.queueSecretUpdateSync,
|
UpdateFunc: e.queueSecretUpdateSync,
|
||||||
DeleteFunc: e.queueSecretSync,
|
DeleteFunc: e.queueSecretSync,
|
||||||
|
@ -144,8 +143,8 @@ type TokensController struct {
|
||||||
secrets cache.Indexer
|
secrets cache.Indexer
|
||||||
|
|
||||||
// Since we join two objects, we'll watch both of them with controllers.
|
// Since we join two objects, we'll watch both of them with controllers.
|
||||||
serviceAccountController *framework.Controller
|
serviceAccountController *cache.Controller
|
||||||
secretController *framework.Controller
|
secretController *cache.Controller
|
||||||
|
|
||||||
// syncServiceAccountQueue handles service account events:
|
// syncServiceAccountQueue handles service account events:
|
||||||
// * ensures a referenced token exists for service accounts which still exist
|
// * ensures a referenced token exists for service accounts which still exist
|
||||||
|
|
18
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
generated
vendored
|
@ -25,10 +25,10 @@ import (
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
kcache "k8s.io/kubernetes/pkg/client/cache"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler"
|
||||||
|
@ -66,10 +66,10 @@ type AttachDetachController interface {
|
||||||
// NewAttachDetachController returns a new instance of AttachDetachController.
|
// NewAttachDetachController returns a new instance of AttachDetachController.
|
||||||
func NewAttachDetachController(
|
func NewAttachDetachController(
|
||||||
kubeClient internalclientset.Interface,
|
kubeClient internalclientset.Interface,
|
||||||
podInformer framework.SharedInformer,
|
podInformer kcache.SharedInformer,
|
||||||
nodeInformer framework.SharedInformer,
|
nodeInformer kcache.SharedInformer,
|
||||||
pvcInformer framework.SharedInformer,
|
pvcInformer kcache.SharedInformer,
|
||||||
pvInformer framework.SharedInformer,
|
pvInformer kcache.SharedInformer,
|
||||||
cloud cloudprovider.Interface,
|
cloud cloudprovider.Interface,
|
||||||
plugins []volume.VolumePlugin,
|
plugins []volume.VolumePlugin,
|
||||||
recorder record.EventRecorder) (AttachDetachController, error) {
|
recorder record.EventRecorder) (AttachDetachController, error) {
|
||||||
|
@ -94,13 +94,13 @@ func NewAttachDetachController(
|
||||||
cloud: cloud,
|
cloud: cloud,
|
||||||
}
|
}
|
||||||
|
|
||||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
podInformer.AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: adc.podAdd,
|
AddFunc: adc.podAdd,
|
||||||
UpdateFunc: adc.podUpdate,
|
UpdateFunc: adc.podUpdate,
|
||||||
DeleteFunc: adc.podDelete,
|
DeleteFunc: adc.podDelete,
|
||||||
})
|
})
|
||||||
|
|
||||||
nodeInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
nodeInformer.AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: adc.nodeAdd,
|
AddFunc: adc.nodeAdd,
|
||||||
UpdateFunc: adc.nodeUpdate,
|
UpdateFunc: adc.nodeUpdate,
|
||||||
DeleteFunc: adc.nodeDelete,
|
DeleteFunc: adc.nodeDelete,
|
||||||
|
@ -143,12 +143,12 @@ type attachDetachController struct {
|
||||||
// pvcInformer is the shared PVC informer used to fetch and store PVC
|
// pvcInformer is the shared PVC informer used to fetch and store PVC
|
||||||
// objects from the API server. It is shared with other controllers and
|
// objects from the API server. It is shared with other controllers and
|
||||||
// therefore the PVC objects in its store should be treated as immutable.
|
// therefore the PVC objects in its store should be treated as immutable.
|
||||||
pvcInformer framework.SharedInformer
|
pvcInformer kcache.SharedInformer
|
||||||
|
|
||||||
// pvInformer is the shared PV informer used to fetch and store PV objects
|
// pvInformer is the shared PV informer used to fetch and store PV objects
|
||||||
// from the API server. It is shared with other controllers and therefore
|
// from the API server. It is shared with other controllers and therefore
|
||||||
// the PV objects in its store should be treated as immutable.
|
// the PV objects in its store should be treated as immutable.
|
||||||
pvInformer framework.SharedInformer
|
pvInformer kcache.SharedInformer
|
||||||
|
|
||||||
// cloud provider used by volume host
|
// cloud provider used by volume host
|
||||||
cloud cloudprovider.Interface
|
cloud cloudprovider.Interface
|
||||||
|
|
|
@ -66,12 +66,12 @@ type ActualStateOfWorld interface {
|
||||||
// the specified volume, an error is returned.
|
// the specified volume, an error is returned.
|
||||||
SetVolumeMountedByNode(volumeName api.UniqueVolumeName, nodeName string, mounted bool) error
|
SetVolumeMountedByNode(volumeName api.UniqueVolumeName, nodeName string, mounted bool) error
|
||||||
|
|
||||||
// ResetNodeStatusUpdateNeeded resets statusUpdateNeeded for the specified
|
// SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified
|
||||||
// node to false indicating the AttachedVolume field of the Node's Status
|
// node to true indicating the AttachedVolume field in the Node's Status
|
||||||
// object has been updated.
|
// object needs to be updated by the node updater again.
|
||||||
// If no node with the name nodeName exists in list of attached nodes for
|
// If the specifed node does not exist in the nodesToUpdateStatusFor list,
|
||||||
// the specified volume, an error is returned.
|
// log the error and return
|
||||||
ResetNodeStatusUpdateNeeded(nodeName string) error
|
SetNodeStatusUpdateNeeded(nodeName string)
|
||||||
|
|
||||||
// ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach
|
// ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach
|
||||||
// request any more for the volume
|
// request any more for the volume
|
||||||
|
@ -278,8 +278,17 @@ func (asw *actualStateOfWorld) AddVolumeNode(
|
||||||
nodesAttachedTo: make(map[string]nodeAttachedTo),
|
nodesAttachedTo: make(map[string]nodeAttachedTo),
|
||||||
devicePath: devicePath,
|
devicePath: devicePath,
|
||||||
}
|
}
|
||||||
asw.attachedVolumes[volumeName] = volumeObj
|
} else {
|
||||||
|
// If volume object already exists, it indicates that the information would be out of date.
|
||||||
|
// Update the fields for volume object except the nodes attached to the volumes.
|
||||||
|
volumeObj.devicePath = devicePath
|
||||||
|
volumeObj.spec = volumeSpec
|
||||||
|
glog.V(2).Infof("Volume %q is already added to attachedVolume list to node %q, update device path %q",
|
||||||
|
volumeName,
|
||||||
|
nodeName,
|
||||||
|
devicePath)
|
||||||
}
|
}
|
||||||
|
asw.attachedVolumes[volumeName] = volumeObj
|
||||||
|
|
||||||
_, nodeExists := volumeObj.nodesAttachedTo[nodeName]
|
_, nodeExists := volumeObj.nodesAttachedTo[nodeName]
|
||||||
if !nodeExists {
|
if !nodeExists {
|
||||||
|
@ -322,7 +331,7 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode(
|
||||||
|
|
||||||
nodeObj.mountedByNode = mounted
|
nodeObj.mountedByNode = mounted
|
||||||
volumeObj.nodesAttachedTo[nodeName] = nodeObj
|
volumeObj.nodesAttachedTo[nodeName] = nodeObj
|
||||||
glog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %q",
|
glog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %t",
|
||||||
volumeName,
|
volumeName,
|
||||||
nodeName,
|
nodeName,
|
||||||
mounted)
|
mounted)
|
||||||
|
@ -433,21 +442,28 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (asw *actualStateOfWorld) ResetNodeStatusUpdateNeeded(
|
// Update the flag statusUpdateNeeded to indicate whether node status is already updated or
|
||||||
nodeName string) error {
|
// needs to be updated again by the node status updater.
|
||||||
asw.Lock()
|
// If the specifed node does not exist in the nodesToUpdateStatusFor list, log the error and return
|
||||||
defer asw.Unlock()
|
// This is an internal function and caller should acquire and release the lock
|
||||||
// Remove volume from volumes to report as attached
|
func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName string, needed bool) {
|
||||||
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
|
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
|
||||||
if !nodeToUpdateExists {
|
if !nodeToUpdateExists {
|
||||||
return fmt.Errorf(
|
// should not happen
|
||||||
"failed to ResetNodeStatusUpdateNeeded(nodeName=%q) nodeName does not exist",
|
glog.Errorf(
|
||||||
|
"Failed to set statusUpdateNeeded to needed %t because nodeName=%q does not exist",
|
||||||
|
needed,
|
||||||
nodeName)
|
nodeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeToUpdate.statusUpdateNeeded = false
|
nodeToUpdate.statusUpdateNeeded = needed
|
||||||
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
|
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
|
||||||
return nil
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName string) {
|
||||||
|
asw.Lock()
|
||||||
|
defer asw.Unlock()
|
||||||
|
asw.updateNodeStatusUpdateNeeded(nodeName, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (asw *actualStateOfWorld) DeleteVolumeNode(
|
func (asw *actualStateOfWorld) DeleteVolumeNode(
|
||||||
|
@ -529,7 +545,7 @@ func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[string][]api.Att
|
||||||
defer asw.RUnlock()
|
defer asw.RUnlock()
|
||||||
|
|
||||||
volumesToReportAttached := make(map[string][]api.AttachedVolume)
|
volumesToReportAttached := make(map[string][]api.AttachedVolume)
|
||||||
for _, nodeToUpdateObj := range asw.nodesToUpdateStatusFor {
|
for nodeName, nodeToUpdateObj := range asw.nodesToUpdateStatusFor {
|
||||||
if nodeToUpdateObj.statusUpdateNeeded {
|
if nodeToUpdateObj.statusUpdateNeeded {
|
||||||
attachedVolumes := make(
|
attachedVolumes := make(
|
||||||
[]api.AttachedVolume,
|
[]api.AttachedVolume,
|
||||||
|
@ -544,6 +560,10 @@ func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[string][]api.Att
|
||||||
}
|
}
|
||||||
volumesToReportAttached[nodeToUpdateObj.nodeName] = attachedVolumes
|
volumesToReportAttached[nodeToUpdateObj.nodeName] = attachedVolumes
|
||||||
}
|
}
|
||||||
|
// When GetVolumesToReportAttached is called by node status updater, the current status
|
||||||
|
// of this node will be updated, so set the flag statusUpdateNeeded to false indicating
|
||||||
|
// the current status is already updated.
|
||||||
|
asw.updateNodeStatusUpdateNeeded(nodeName, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumesToReportAttached
|
return volumesToReportAttached
|
||||||
|
@ -557,6 +577,7 @@ func getAttachedVolume(
|
||||||
VolumeName: attachedVolume.volumeName,
|
VolumeName: attachedVolume.volumeName,
|
||||||
VolumeSpec: attachedVolume.spec,
|
VolumeSpec: attachedVolume.spec,
|
||||||
NodeName: nodeAttachedTo.nodeName,
|
NodeName: nodeAttachedTo.nodeName,
|
||||||
|
DevicePath: attachedVolume.devicePath,
|
||||||
PluginIsAttachable: true,
|
PluginIsAttachable: true,
|
||||||
},
|
},
|
||||||
MountedByNode: nodeAttachedTo.mountedByNode,
|
MountedByNode: nodeAttachedTo.mountedByNode,
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
kcache "k8s.io/kubernetes/pkg/client/cache"
|
kcache "k8s.io/kubernetes/pkg/client/cache"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
|
@ -48,7 +47,7 @@ type DesiredStateOfWorldPopulator interface {
|
||||||
// desiredStateOfWorld - the cache to populate
|
// desiredStateOfWorld - the cache to populate
|
||||||
func NewDesiredStateOfWorldPopulator(
|
func NewDesiredStateOfWorldPopulator(
|
||||||
loopSleepDuration time.Duration,
|
loopSleepDuration time.Duration,
|
||||||
podInformer framework.SharedInformer,
|
podInformer kcache.SharedInformer,
|
||||||
desiredStateOfWorld cache.DesiredStateOfWorld) DesiredStateOfWorldPopulator {
|
desiredStateOfWorld cache.DesiredStateOfWorld) DesiredStateOfWorldPopulator {
|
||||||
return &desiredStateOfWorldPopulator{
|
return &desiredStateOfWorldPopulator{
|
||||||
loopSleepDuration: loopSleepDuration,
|
loopSleepDuration: loopSleepDuration,
|
||||||
|
@ -59,7 +58,7 @@ func NewDesiredStateOfWorldPopulator(
|
||||||
|
|
||||||
type desiredStateOfWorldPopulator struct {
|
type desiredStateOfWorldPopulator struct {
|
||||||
loopSleepDuration time.Duration
|
loopSleepDuration time.Duration
|
||||||
podInformer framework.SharedInformer
|
podInformer kcache.SharedInformer
|
||||||
desiredStateOfWorld cache.DesiredStateOfWorld
|
desiredStateOfWorld cache.DesiredStateOfWorld
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,8 +25,8 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
kcache "k8s.io/kubernetes/pkg/client/cache"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||||
"k8s.io/kubernetes/pkg/util/strategicpatch"
|
"k8s.io/kubernetes/pkg/util/strategicpatch"
|
||||||
)
|
)
|
||||||
|
@ -42,7 +42,7 @@ type NodeStatusUpdater interface {
|
||||||
// NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.
|
// NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.
|
||||||
func NewNodeStatusUpdater(
|
func NewNodeStatusUpdater(
|
||||||
kubeClient internalclientset.Interface,
|
kubeClient internalclientset.Interface,
|
||||||
nodeInformer framework.SharedInformer,
|
nodeInformer kcache.SharedInformer,
|
||||||
actualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater {
|
actualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater {
|
||||||
return &nodeStatusUpdater{
|
return &nodeStatusUpdater{
|
||||||
actualStateOfWorld: actualStateOfWorld,
|
actualStateOfWorld: actualStateOfWorld,
|
||||||
|
@ -53,7 +53,7 @@ func NewNodeStatusUpdater(
|
||||||
|
|
||||||
type nodeStatusUpdater struct {
|
type nodeStatusUpdater struct {
|
||||||
kubeClient internalclientset.Interface
|
kubeClient internalclientset.Interface
|
||||||
nodeInformer framework.SharedInformer
|
nodeInformer kcache.SharedInformer
|
||||||
actualStateOfWorld cache.ActualStateOfWorld
|
actualStateOfWorld cache.ActualStateOfWorld
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,20 +107,15 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
|
||||||
|
|
||||||
_, err = nsu.kubeClient.Core().Nodes().PatchStatus(nodeName, patchBytes)
|
_, err = nsu.kubeClient.Core().Nodes().PatchStatus(nodeName, patchBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// If update node status fails, reset flag statusUpdateNeeded back to true
|
||||||
|
// to indicate this node status needs to be udpated again
|
||||||
|
nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"failed to kubeClient.Core().Nodes().Patch for node %q. %v",
|
"failed to kubeClient.Core().Nodes().Patch for node %q. %v",
|
||||||
nodeName,
|
nodeName,
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nsu.actualStateOfWorld.ResetNodeStatusUpdateNeeded(nodeName)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"failed to ResetNodeStatusUpdateNeeded for node %q. %v",
|
|
||||||
nodeName,
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.V(3).Infof(
|
glog.V(3).Infof(
|
||||||
"Updating status for node %q succeeded. patchBytes: %q",
|
"Updating status for node %q succeeded. patchBytes: %q",
|
||||||
nodeName,
|
nodeName,
|
||||||
|
|
|
@ -28,7 +28,6 @@ import (
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/conversion"
|
"k8s.io/kubernetes/pkg/conversion"
|
||||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||||
vol "k8s.io/kubernetes/pkg/volume"
|
vol "k8s.io/kubernetes/pkg/volume"
|
||||||
|
@ -150,12 +149,12 @@ const createProvisionedPVInterval = 10 * time.Second
|
||||||
|
|
||||||
// PersistentVolumeController is a controller that synchronizes
|
// PersistentVolumeController is a controller that synchronizes
|
||||||
// PersistentVolumeClaims and PersistentVolumes. It starts two
|
// PersistentVolumeClaims and PersistentVolumes. It starts two
|
||||||
// framework.Controllers that watch PersistentVolume and PersistentVolumeClaim
|
// cache.Controllers that watch PersistentVolume and PersistentVolumeClaim
|
||||||
// changes.
|
// changes.
|
||||||
type PersistentVolumeController struct {
|
type PersistentVolumeController struct {
|
||||||
volumeController *framework.Controller
|
volumeController *cache.Controller
|
||||||
volumeSource cache.ListerWatcher
|
volumeSource cache.ListerWatcher
|
||||||
claimController *framework.Controller
|
claimController *cache.Controller
|
||||||
claimSource cache.ListerWatcher
|
claimSource cache.ListerWatcher
|
||||||
classReflector *cache.Reflector
|
classReflector *cache.Reflector
|
||||||
classSource cache.ListerWatcher
|
classSource cache.ListerWatcher
|
||||||
|
@ -191,7 +190,7 @@ type PersistentVolumeController struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// syncClaim is the main controller method to decide what to do with a claim.
|
// syncClaim is the main controller method to decide what to do with a claim.
|
||||||
// It's invoked by appropriate framework.Controller callbacks when a claim is
|
// It's invoked by appropriate cache.Controller callbacks when a claim is
|
||||||
// created, updated or periodically synced. We do not differentiate between
|
// created, updated or periodically synced. We do not differentiate between
|
||||||
// these events.
|
// these events.
|
||||||
// For easier readability, it was split into syncUnboundClaim and syncBoundClaim
|
// For easier readability, it was split into syncUnboundClaim and syncBoundClaim
|
||||||
|
@ -381,7 +380,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu
|
||||||
}
|
}
|
||||||
|
|
||||||
// syncVolume is the main controller method to decide what to do with a volume.
|
// syncVolume is the main controller method to decide what to do with a volume.
|
||||||
// It's invoked by appropriate framework.Controller callbacks when a volume is
|
// It's invoked by appropriate cache.Controller callbacks when a volume is
|
||||||
// created, updated or periodically synced. We do not differentiate between
|
// created, updated or periodically synced. We do not differentiate between
|
||||||
// these events.
|
// these events.
|
||||||
func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) error {
|
func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) error {
|
||||||
|
@ -913,7 +912,6 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *api.PersistentVolum
|
||||||
// Update the status
|
// Update the status
|
||||||
_, err = ctrl.updateVolumePhase(newVol, api.VolumeAvailable, "")
|
_, err = ctrl.updateVolumePhase(newVol, api.VolumeAvailable, "")
|
||||||
return err
|
return err
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// reclaimVolume implements volume.Spec.PersistentVolumeReclaimPolicy and
|
// reclaimVolume implements volume.Spec.PersistentVolumeReclaimPolicy and
|
||||||
|
@ -996,7 +994,8 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Plugin found
|
// Plugin found
|
||||||
recycler, err := plugin.NewRecycler(volume.Name, spec)
|
recorder := ctrl.newRecyclerEventRecorder(volume)
|
||||||
|
recycler, err := plugin.NewRecycler(volume.Name, spec, recorder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Cannot create recycler
|
// Cannot create recycler
|
||||||
strerr := fmt.Sprintf("Failed to create recycler: %v", err)
|
strerr := fmt.Sprintf("Failed to create recycler: %v", err)
|
||||||
|
@ -1024,6 +1023,8 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{})
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(2).Infof("volume %q recycled", volume.Name)
|
glog.V(2).Infof("volume %q recycled", volume.Name)
|
||||||
|
// Send an event
|
||||||
|
ctrl.eventRecorder.Event(volume, api.EventTypeNormal, "VolumeRecycled", "Volume recycled")
|
||||||
// Make the volume available again
|
// Make the volume available again
|
||||||
if err = ctrl.unbindVolume(volume); err != nil {
|
if err = ctrl.unbindVolume(volume); err != nil {
|
||||||
// Oops, could not save the volume and therefore the controller will
|
// Oops, could not save the volume and therefore the controller will
|
||||||
|
@ -1366,6 +1367,17 @@ func (ctrl *PersistentVolumeController) scheduleOperation(operationName string,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newRecyclerEventRecorder returns a RecycleEventRecorder that sends all events
|
||||||
|
// to given volume.
|
||||||
|
func (ctrl *PersistentVolumeController) newRecyclerEventRecorder(volume *api.PersistentVolume) vol.RecycleEventRecorder {
|
||||||
|
return func(eventtype, message string) {
|
||||||
|
ctrl.eventRecorder.Eventf(volume, eventtype, "RecyclerPod", "Recycler pod: %s", message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// findProvisionablePlugin finds a provisioner plugin for a given claim.
|
||||||
|
// It returns either the provisioning plugin or nil when an external
|
||||||
|
// provisioner is requested.
|
||||||
func (ctrl *PersistentVolumeController) findProvisionablePlugin(claim *api.PersistentVolumeClaim) (vol.ProvisionableVolumePlugin, *storage.StorageClass, error) {
|
func (ctrl *PersistentVolumeController) findProvisionablePlugin(claim *api.PersistentVolumeClaim) (vol.ProvisionableVolumePlugin, *storage.StorageClass, error) {
|
||||||
// TODO: remove this alpha behavior in 1.5
|
// TODO: remove this alpha behavior in 1.5
|
||||||
alpha := hasAnnotation(claim.ObjectMeta, annAlphaClass)
|
alpha := hasAnnotation(claim.ObjectMeta, annAlphaClass)
|
||||||
|
|
25
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_base.go
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_base.go
generated
vendored
|
@ -30,7 +30,6 @@ import (
|
||||||
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/conversion"
|
"k8s.io/kubernetes/pkg/conversion"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||||
|
@ -65,7 +64,7 @@ func NewPersistentVolumeController(
|
||||||
|
|
||||||
controller := &PersistentVolumeController{
|
controller := &PersistentVolumeController{
|
||||||
volumes: newPersistentVolumeOrderedIndex(),
|
volumes: newPersistentVolumeOrderedIndex(),
|
||||||
claims: cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc),
|
claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
eventRecorder: eventRecorder,
|
eventRecorder: eventRecorder,
|
||||||
runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */),
|
runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */),
|
||||||
|
@ -120,22 +119,22 @@ func NewPersistentVolumeController(
|
||||||
}
|
}
|
||||||
controller.classSource = classSource
|
controller.classSource = classSource
|
||||||
|
|
||||||
_, controller.volumeController = framework.NewIndexerInformer(
|
_, controller.volumeController = cache.NewIndexerInformer(
|
||||||
volumeSource,
|
volumeSource,
|
||||||
&api.PersistentVolume{},
|
&api.PersistentVolume{},
|
||||||
syncPeriod,
|
syncPeriod,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: controller.addVolume,
|
AddFunc: controller.addVolume,
|
||||||
UpdateFunc: controller.updateVolume,
|
UpdateFunc: controller.updateVolume,
|
||||||
DeleteFunc: controller.deleteVolume,
|
DeleteFunc: controller.deleteVolume,
|
||||||
},
|
},
|
||||||
cache.Indexers{"accessmodes": accessModesIndexFunc},
|
cache.Indexers{"accessmodes": accessModesIndexFunc},
|
||||||
)
|
)
|
||||||
_, controller.claimController = framework.NewInformer(
|
_, controller.claimController = cache.NewInformer(
|
||||||
claimSource,
|
claimSource,
|
||||||
&api.PersistentVolumeClaim{},
|
&api.PersistentVolumeClaim{},
|
||||||
syncPeriod,
|
syncPeriod,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: controller.addClaim,
|
AddFunc: controller.addClaim,
|
||||||
UpdateFunc: controller.updateClaim,
|
UpdateFunc: controller.updateClaim,
|
||||||
DeleteFunc: controller.deleteClaim,
|
DeleteFunc: controller.deleteClaim,
|
||||||
|
@ -144,7 +143,7 @@ func NewPersistentVolumeController(
|
||||||
|
|
||||||
// This is just a cache of StorageClass instances, no special actions are
|
// This is just a cache of StorageClass instances, no special actions are
|
||||||
// needed when a class is created/deleted/updated.
|
// needed when a class is created/deleted/updated.
|
||||||
controller.classes = cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)
|
controller.classes = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
|
||||||
controller.classReflector = cache.NewReflector(
|
controller.classReflector = cache.NewReflector(
|
||||||
classSource,
|
classSource,
|
||||||
&storage.StorageClass{},
|
&storage.StorageClass{},
|
||||||
|
@ -212,7 +211,7 @@ func (ctrl *PersistentVolumeController) storeClaimUpdate(claim *api.PersistentVo
|
||||||
return storeObjectUpdate(ctrl.claims, claim, "claim")
|
return storeObjectUpdate(ctrl.claims, claim, "claim")
|
||||||
}
|
}
|
||||||
|
|
||||||
// addVolume is callback from framework.Controller watching PersistentVolume
|
// addVolume is callback from cache.Controller watching PersistentVolume
|
||||||
// events.
|
// events.
|
||||||
func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
|
func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
|
||||||
pv, ok := obj.(*api.PersistentVolume)
|
pv, ok := obj.(*api.PersistentVolume)
|
||||||
|
@ -247,7 +246,7 @@ func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateVolume is callback from framework.Controller watching PersistentVolume
|
// updateVolume is callback from cache.Controller watching PersistentVolume
|
||||||
// events.
|
// events.
|
||||||
func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) {
|
func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) {
|
||||||
newVolume, ok := newObj.(*api.PersistentVolume)
|
newVolume, ok := newObj.(*api.PersistentVolume)
|
||||||
|
@ -282,7 +281,7 @@ func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteVolume is callback from framework.Controller watching PersistentVolume
|
// deleteVolume is callback from cache.Controller watching PersistentVolume
|
||||||
// events.
|
// events.
|
||||||
func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) {
|
func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) {
|
||||||
_ = ctrl.volumes.store.Delete(obj)
|
_ = ctrl.volumes.store.Delete(obj)
|
||||||
|
@ -330,7 +329,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// addClaim is callback from framework.Controller watching PersistentVolumeClaim
|
// addClaim is callback from cache.Controller watching PersistentVolumeClaim
|
||||||
// events.
|
// events.
|
||||||
func (ctrl *PersistentVolumeController) addClaim(obj interface{}) {
|
func (ctrl *PersistentVolumeController) addClaim(obj interface{}) {
|
||||||
// Store the new claim version in the cache and do not process it if this is
|
// Store the new claim version in the cache and do not process it if this is
|
||||||
|
@ -360,7 +359,7 @@ func (ctrl *PersistentVolumeController) addClaim(obj interface{}) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateClaim is callback from framework.Controller watching PersistentVolumeClaim
|
// updateClaim is callback from cache.Controller watching PersistentVolumeClaim
|
||||||
// events.
|
// events.
|
||||||
func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) {
|
func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) {
|
||||||
// Store the new claim version in the cache and do not process it if this is
|
// Store the new claim version in the cache and do not process it if this is
|
||||||
|
@ -390,7 +389,7 @@ func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteClaim is callback from framework.Controller watching PersistentVolumeClaim
|
// deleteClaim is callback from cache.Controller watching PersistentVolumeClaim
|
||||||
// events.
|
// events.
|
||||||
func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) {
|
func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) {
|
||||||
_ = ctrl.claims.Delete(obj)
|
_ = ctrl.claims.Delete(obj)
|
||||||
|
|
|
@ -502,6 +502,7 @@ func (s *GenericAPIServer) init(c *Config) {
|
||||||
|
|
||||||
attributeGetter := apiserver.NewRequestAttributeGetter(s.RequestContextMapper, s.NewRequestInfoResolver())
|
attributeGetter := apiserver.NewRequestAttributeGetter(s.RequestContextMapper, s.NewRequestInfoResolver())
|
||||||
handler = apiserver.WithAuthorizationCheck(handler, attributeGetter, s.authorizer)
|
handler = apiserver.WithAuthorizationCheck(handler, attributeGetter, s.authorizer)
|
||||||
|
handler = apiserver.WithImpersonation(handler, s.RequestContextMapper, s.authorizer)
|
||||||
if len(c.AuditLogPath) != 0 {
|
if len(c.AuditLogPath) != 0 {
|
||||||
// audit handler must comes before the impersonationFilter to read the original user
|
// audit handler must comes before the impersonationFilter to read the original user
|
||||||
writer := &lumberjack.Logger{
|
writer := &lumberjack.Logger{
|
||||||
|
@ -511,9 +512,7 @@ func (s *GenericAPIServer) init(c *Config) {
|
||||||
MaxSize: c.AuditLogMaxSize,
|
MaxSize: c.AuditLogMaxSize,
|
||||||
}
|
}
|
||||||
handler = audit.WithAudit(handler, attributeGetter, writer)
|
handler = audit.WithAudit(handler, attributeGetter, writer)
|
||||||
defer writer.Close()
|
|
||||||
}
|
}
|
||||||
handler = apiserver.WithImpersonation(handler, s.RequestContextMapper, s.authorizer)
|
|
||||||
|
|
||||||
// Install Authenticator
|
// Install Authenticator
|
||||||
if c.Authenticator != nil {
|
if c.Authenticator != nil {
|
||||||
|
|
|
@ -38,8 +38,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
DefaultDeserializationCacheSize = 50000
|
|
||||||
|
|
||||||
// TODO: This can be tightened up. It still matches objects named watch or proxy.
|
// TODO: This can be tightened up. It still matches objects named watch or proxy.
|
||||||
defaultLongRunningRequestRE = "(/|^)((watch|proxy)(/|$)|(logs?|portforward|exec|attach)/?$)"
|
defaultLongRunningRequestRE = "(/|^)((watch|proxy)(/|$)|(logs?|portforward|exec|attach)/?$)"
|
||||||
)
|
)
|
||||||
|
@ -157,7 +155,9 @@ func NewServerRunOptions() *ServerRunOptions {
|
||||||
func (o *ServerRunOptions) WithEtcdOptions() *ServerRunOptions {
|
func (o *ServerRunOptions) WithEtcdOptions() *ServerRunOptions {
|
||||||
o.StorageConfig = storagebackend.Config{
|
o.StorageConfig = storagebackend.Config{
|
||||||
Prefix: DefaultEtcdPathPrefix,
|
Prefix: DefaultEtcdPathPrefix,
|
||||||
DeserializationCacheSize: DefaultDeserializationCacheSize,
|
// Default cache size to 0 - if unset, its size will be set based on target
|
||||||
|
// memory usage.
|
||||||
|
DeserializationCacheSize: 0,
|
||||||
}
|
}
|
||||||
return o
|
return o
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,14 +46,17 @@ type Tunneler interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type SSHTunneler struct {
|
type SSHTunneler struct {
|
||||||
|
// Important: Since these two int64 fields are using sync/atomic, they have to be at the top of the struct due to a bug on 32-bit platforms
|
||||||
|
// See: https://golang.org/pkg/sync/atomic/ for more information
|
||||||
|
lastSync int64 // Seconds since Epoch
|
||||||
|
lastSSHKeySync int64 // Seconds since Epoch
|
||||||
|
|
||||||
SSHUser string
|
SSHUser string
|
||||||
SSHKeyfile string
|
SSHKeyfile string
|
||||||
InstallSSHKey InstallSSHKey
|
InstallSSHKey InstallSSHKey
|
||||||
HealthCheckURL *url.URL
|
HealthCheckURL *url.URL
|
||||||
|
|
||||||
tunnels *ssh.SSHTunnelList
|
tunnels *ssh.SSHTunnelList
|
||||||
lastSync int64 // Seconds since Epoch
|
|
||||||
lastSSHKeySync int64 // Seconds since Epoch
|
|
||||||
lastSyncMetric prometheus.GaugeFunc
|
lastSyncMetric prometheus.GaugeFunc
|
||||||
clock clock.Clock
|
clock clock.Clock
|
||||||
|
|
||||||
|
|
|
@ -158,9 +158,6 @@ const (
|
||||||
// Period for performing image garbage collection.
|
// Period for performing image garbage collection.
|
||||||
ImageGCPeriod = 5 * time.Minute
|
ImageGCPeriod = 5 * time.Minute
|
||||||
|
|
||||||
// maxImagesInStatus is the number of max images we store in image status.
|
|
||||||
maxImagesInNodeStatus = 50
|
|
||||||
|
|
||||||
// Minimum number of dead containers to keep in a pod
|
// Minimum number of dead containers to keep in a pod
|
||||||
minDeadContainerInPod = 1
|
minDeadContainerInPod = 1
|
||||||
)
|
)
|
||||||
|
|
|
@ -39,6 +39,15 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// maxImagesInNodeStatus is the number of max images we store in image status.
|
||||||
|
maxImagesInNodeStatus = 50
|
||||||
|
|
||||||
|
// maxNamesPerImageInNodeStatus is max number of names per image stored in
|
||||||
|
// the node status.
|
||||||
|
maxNamesPerImageInNodeStatus = 5
|
||||||
|
)
|
||||||
|
|
||||||
// registerWithApiServer registers the node with the cluster master. It is safe
|
// registerWithApiServer registers the node with the cluster master. It is safe
|
||||||
// to call multiple times, but not concurrently (kl.registrationCompleted is
|
// to call multiple times, but not concurrently (kl.registrationCompleted is
|
||||||
// not locked).
|
// not locked).
|
||||||
|
@ -501,8 +510,13 @@ func (kl *Kubelet) setNodeStatusImages(node *api.Node) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, image := range containerImages {
|
for _, image := range containerImages {
|
||||||
|
names := append(image.RepoDigests, image.RepoTags...)
|
||||||
|
// Report up to maxNamesPerImageInNodeStatus names per image.
|
||||||
|
if len(names) > maxNamesPerImageInNodeStatus {
|
||||||
|
names = names[0:maxNamesPerImageInNodeStatus]
|
||||||
|
}
|
||||||
imagesOnNode = append(imagesOnNode, api.ContainerImage{
|
imagesOnNode = append(imagesOnNode, api.ContainerImage{
|
||||||
Names: append(image.RepoTags, image.RepoDigests...),
|
Names: names,
|
||||||
SizeBytes: image.Size,
|
SizeBytes: image.Size,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,6 +40,12 @@ func (kl *Kubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume
|
||||||
podVolumes := kl.volumeManager.GetMountedVolumesForPod(
|
podVolumes := kl.volumeManager.GetMountedVolumesForPod(
|
||||||
volumetypes.UniquePodName(podUID))
|
volumetypes.UniquePodName(podUID))
|
||||||
for outerVolumeSpecName, volume := range podVolumes {
|
for outerVolumeSpecName, volume := range podVolumes {
|
||||||
|
// TODO: volume.Mounter could be nil if volume object is recovered
|
||||||
|
// from reconciler's sync state process. PR 33616 will fix this problem
|
||||||
|
// to create Mounter object when recovering volume state.
|
||||||
|
if volume.Mounter == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
volumesToReturn[outerVolumeSpecName] = volume.Mounter
|
volumesToReturn[outerVolumeSpecName] = volume.Mounter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
9
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go
generated
vendored
|
@ -366,8 +366,15 @@ func (asw *actualStateOfWorld) addVolume(
|
||||||
globallyMounted: false,
|
globallyMounted: false,
|
||||||
devicePath: devicePath,
|
devicePath: devicePath,
|
||||||
}
|
}
|
||||||
asw.attachedVolumes[volumeName] = volumeObj
|
} else {
|
||||||
|
// If volume object already exists, update the fields such as device path
|
||||||
|
volumeObj.devicePath = devicePath
|
||||||
|
volumeObj.spec = volumeSpec
|
||||||
|
glog.V(2).Infof("Volume %q is already added to attachedVolume list, update device path %q",
|
||||||
|
volumeName,
|
||||||
|
devicePath)
|
||||||
}
|
}
|
||||||
|
asw.attachedVolumes[volumeName] = volumeObj
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,6 +79,12 @@ func exceptKey(except string) includeFunc {
|
||||||
|
|
||||||
// etcdWatcher converts a native etcd watch to a watch.Interface.
|
// etcdWatcher converts a native etcd watch to a watch.Interface.
|
||||||
type etcdWatcher struct {
|
type etcdWatcher struct {
|
||||||
|
// HighWaterMarks for performance debugging.
|
||||||
|
// Important: Since HighWaterMark is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platforms
|
||||||
|
// See: https://golang.org/pkg/sync/atomic/ for more information
|
||||||
|
incomingHWM HighWaterMark
|
||||||
|
outgoingHWM HighWaterMark
|
||||||
|
|
||||||
encoding runtime.Codec
|
encoding runtime.Codec
|
||||||
// Note that versioner is required for etcdWatcher to work correctly.
|
// Note that versioner is required for etcdWatcher to work correctly.
|
||||||
// There is no public constructor of it, so be careful when manipulating
|
// There is no public constructor of it, so be careful when manipulating
|
||||||
|
@ -108,10 +114,6 @@ type etcdWatcher struct {
|
||||||
// Injectable for testing. Send the event down the outgoing channel.
|
// Injectable for testing. Send the event down the outgoing channel.
|
||||||
emit func(watch.Event)
|
emit func(watch.Event)
|
||||||
|
|
||||||
// HighWaterMarks for performance debugging.
|
|
||||||
incomingHWM HighWaterMark
|
|
||||||
outgoingHWM HighWaterMark
|
|
||||||
|
|
||||||
cache etcdCache
|
cache etcdCache
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,7 @@ var (
|
||||||
// semantic version is a git hash, but the version itself is no
|
// semantic version is a git hash, but the version itself is no
|
||||||
// longer the direct output of "git describe", but a slight
|
// longer the direct output of "git describe", but a slight
|
||||||
// translation to be semver compliant.
|
// translation to be semver compliant.
|
||||||
gitVersion string = "v1.4.0+$Format:%h$"
|
gitVersion string = "v1.4.1+$Format:%h$"
|
||||||
gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
|
gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
|
||||||
gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty"
|
gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty"
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ func ProbeVolumePlugins(volumeConfig volume.VolumeConfig) []volume.VolumePlugin
|
||||||
type hostPathPlugin struct {
|
type hostPathPlugin struct {
|
||||||
host volume.VolumeHost
|
host volume.VolumeHost
|
||||||
// decouple creating Recyclers/Deleters/Provisioners by deferring to a function. Allows for easier testing.
|
// decouple creating Recyclers/Deleters/Provisioners by deferring to a function. Allows for easier testing.
|
||||||
newRecyclerFunc func(pvName string, spec *volume.Spec, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error)
|
newRecyclerFunc func(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error)
|
||||||
newDeleterFunc func(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error)
|
newDeleterFunc func(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error)
|
||||||
newProvisionerFunc func(options volume.VolumeOptions, host volume.VolumeHost) (volume.Provisioner, error)
|
newProvisionerFunc func(options volume.VolumeOptions, host volume.VolumeHost) (volume.Provisioner, error)
|
||||||
config volume.VolumeConfig
|
config volume.VolumeConfig
|
||||||
|
@ -112,8 +112,8 @@ func (plugin *hostPathPlugin) NewUnmounter(volName string, podUID types.UID) (vo
|
||||||
}}, nil
|
}}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *hostPathPlugin) NewRecycler(pvName string, spec *volume.Spec) (volume.Recycler, error) {
|
func (plugin *hostPathPlugin) NewRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) (volume.Recycler, error) {
|
||||||
return plugin.newRecyclerFunc(pvName, spec, plugin.host, plugin.config)
|
return plugin.newRecyclerFunc(pvName, spec, eventRecorder, plugin.host, plugin.config)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||||
|
@ -142,7 +142,7 @@ func (plugin *hostPathPlugin) ConstructVolumeSpec(volumeName, mountPath string)
|
||||||
return volume.NewSpecFromVolume(hostPathVolume), nil
|
return volume.NewSpecFromVolume(hostPathVolume), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRecycler(pvName string, spec *volume.Spec, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) {
|
func newRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) {
|
||||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
|
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
|
||||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
|
return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
|
||||||
}
|
}
|
||||||
|
@ -154,6 +154,7 @@ func newRecycler(pvName string, spec *volume.Spec, host volume.VolumeHost, confi
|
||||||
config: config,
|
config: config,
|
||||||
timeout: volume.CalculateTimeoutForVolume(config.RecyclerMinimumTimeout, config.RecyclerTimeoutIncrement, spec.PersistentVolume),
|
timeout: volume.CalculateTimeoutForVolume(config.RecyclerMinimumTimeout, config.RecyclerTimeoutIncrement, spec.PersistentVolume),
|
||||||
pvName: pvName,
|
pvName: pvName,
|
||||||
|
eventRecorder: eventRecorder,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,6 +236,7 @@ type hostPathRecycler struct {
|
||||||
timeout int64
|
timeout int64
|
||||||
volume.MetricsNil
|
volume.MetricsNil
|
||||||
pvName string
|
pvName string
|
||||||
|
eventRecorder volume.RecycleEventRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *hostPathRecycler) GetPath() string {
|
func (r *hostPathRecycler) GetPath() string {
|
||||||
|
@ -253,7 +255,7 @@ func (r *hostPathRecycler) Recycle() error {
|
||||||
Path: r.path,
|
Path: r.path,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient())
|
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient(), r.eventRecorder)
|
||||||
}
|
}
|
||||||
|
|
||||||
// hostPathProvisioner implements a Provisioner for the HostPath plugin
|
// hostPathProvisioner implements a Provisioner for the HostPath plugin
|
||||||
|
|
|
@ -46,7 +46,7 @@ func ProbeVolumePlugins(volumeConfig volume.VolumeConfig) []volume.VolumePlugin
|
||||||
type nfsPlugin struct {
|
type nfsPlugin struct {
|
||||||
host volume.VolumeHost
|
host volume.VolumeHost
|
||||||
// decouple creating recyclers by deferring to a function. Allows for easier testing.
|
// decouple creating recyclers by deferring to a function. Allows for easier testing.
|
||||||
newRecyclerFunc func(pvName string, spec *volume.Spec, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error)
|
newRecyclerFunc func(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error)
|
||||||
config volume.VolumeConfig
|
config volume.VolumeConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,8 +132,8 @@ func (plugin *nfsPlugin) newUnmounterInternal(volName string, podUID types.UID,
|
||||||
}}, nil
|
}}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *nfsPlugin) NewRecycler(pvName string, spec *volume.Spec) (volume.Recycler, error) {
|
func (plugin *nfsPlugin) NewRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) (volume.Recycler, error) {
|
||||||
return plugin.newRecyclerFunc(pvName, spec, plugin.host, plugin.config)
|
return plugin.newRecyclerFunc(pvName, spec, eventRecorder, plugin.host, plugin.config)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *nfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
func (plugin *nfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||||
|
@ -274,7 +274,7 @@ func (c *nfsUnmounter) TearDownAt(dir string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRecycler(pvName string, spec *volume.Spec, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error) {
|
func newRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error) {
|
||||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.NFS == nil {
|
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.NFS == nil {
|
||||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.NFS is nil")
|
return nil, fmt.Errorf("spec.PersistentVolumeSource.NFS is nil")
|
||||||
}
|
}
|
||||||
|
@ -286,6 +286,7 @@ func newRecycler(pvName string, spec *volume.Spec, host volume.VolumeHost, volum
|
||||||
config: volumeConfig,
|
config: volumeConfig,
|
||||||
timeout: volume.CalculateTimeoutForVolume(volumeConfig.RecyclerMinimumTimeout, volumeConfig.RecyclerTimeoutIncrement, spec.PersistentVolume),
|
timeout: volume.CalculateTimeoutForVolume(volumeConfig.RecyclerMinimumTimeout, volumeConfig.RecyclerTimeoutIncrement, spec.PersistentVolume),
|
||||||
pvName: pvName,
|
pvName: pvName,
|
||||||
|
eventRecorder: eventRecorder,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -299,6 +300,7 @@ type nfsRecycler struct {
|
||||||
timeout int64
|
timeout int64
|
||||||
volume.MetricsNil
|
volume.MetricsNil
|
||||||
pvName string
|
pvName string
|
||||||
|
eventRecorder volume.RecycleEventRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *nfsRecycler) GetPath() string {
|
func (r *nfsRecycler) GetPath() string {
|
||||||
|
@ -318,7 +320,7 @@ func (r *nfsRecycler) Recycle() error {
|
||||||
Path: r.path,
|
Path: r.path,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient())
|
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient(), r.eventRecorder)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.NFSVolumeSource, bool, error) {
|
func getVolumeSource(spec *volume.Spec) (*api.NFSVolumeSource, bool, error) {
|
||||||
|
|
|
@ -124,9 +124,12 @@ type PersistentVolumePlugin interface {
|
||||||
// again to new claims
|
// again to new claims
|
||||||
type RecyclableVolumePlugin interface {
|
type RecyclableVolumePlugin interface {
|
||||||
VolumePlugin
|
VolumePlugin
|
||||||
// NewRecycler creates a new volume.Recycler which knows how to reclaim
|
// NewRecycler creates a new volume.Recycler which knows how to reclaim this
|
||||||
// this resource after the volume's release from a PersistentVolumeClaim
|
// resource after the volume's release from a PersistentVolumeClaim. The
|
||||||
NewRecycler(pvName string, spec *Spec) (Recycler, error)
|
// recycler will use the provided recorder to write any events that might be
|
||||||
|
// interesting to user. It's expected that caller will pass these events to
|
||||||
|
// the PV being recycled.
|
||||||
|
NewRecycler(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) (Recycler, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeletableVolumePlugin is an extended interface of VolumePlugin and is used
|
// DeletableVolumePlugin is an extended interface of VolumePlugin and is used
|
||||||
|
|
|
@ -19,13 +19,10 @@ package volume
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
|
||||||
"k8s.io/kubernetes/pkg/watch"
|
"k8s.io/kubernetes/pkg/watch"
|
||||||
|
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
|
@ -39,6 +36,8 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type RecycleEventRecorder func(eventtype, message string)
|
||||||
|
|
||||||
// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume
|
// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume
|
||||||
// Recyclers. This function will save the given Pod to the API and watch it
|
// Recyclers. This function will save the given Pod to the API and watch it
|
||||||
// until it completes, fails, or the pod's ActiveDeadlineSeconds is exceeded,
|
// until it completes, fails, or the pod's ActiveDeadlineSeconds is exceeded,
|
||||||
|
@ -52,8 +51,8 @@ import (
|
||||||
// pod - the pod designed by a volume plugin to recycle the volume. pod.Name
|
// pod - the pod designed by a volume plugin to recycle the volume. pod.Name
|
||||||
// will be overwritten with unique name based on PV.Name.
|
// will be overwritten with unique name based on PV.Name.
|
||||||
// client - kube client for API operations.
|
// client - kube client for API operations.
|
||||||
func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.Pod, kubeClient clientset.Interface) error {
|
func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error {
|
||||||
return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient))
|
return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder))
|
||||||
}
|
}
|
||||||
|
|
||||||
// same as above func comments, except 'recyclerClient' is a narrower pod API
|
// same as above func comments, except 'recyclerClient' is a narrower pod API
|
||||||
|
@ -67,36 +66,63 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.P
|
||||||
pod.Name = "recycler-for-" + pvName
|
pod.Name = "recycler-for-" + pvName
|
||||||
pod.GenerateName = ""
|
pod.GenerateName = ""
|
||||||
|
|
||||||
|
stopChannel := make(chan struct{})
|
||||||
|
defer close(stopChannel)
|
||||||
|
podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Start the pod
|
// Start the pod
|
||||||
_, err := recyclerClient.CreatePod(pod)
|
_, err = recyclerClient.CreatePod(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.IsAlreadyExists(err) {
|
if errors.IsAlreadyExists(err) {
|
||||||
glog.V(5).Infof("old recycler pod %q found for volume", pod.Name)
|
glog.V(5).Infof("old recycler pod %q found for volume", pod.Name)
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Unexpected error creating recycler pod: %+v\n", err)
|
return fmt.Errorf("unexpected error creating recycler pod: %+v\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
defer recyclerClient.DeletePod(pod.Name, pod.Namespace)
|
defer recyclerClient.DeletePod(pod.Name, pod.Namespace)
|
||||||
|
|
||||||
// Now only the old pod or the new pod run. Watch it until it finishes.
|
// Now only the old pod or the new pod run. Watch it until it finishes
|
||||||
stopChannel := make(chan struct{})
|
// and send all events on the pod to the PV
|
||||||
defer close(stopChannel)
|
|
||||||
nextPod := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel)
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
watchedPod := nextPod()
|
event := <-podCh
|
||||||
if watchedPod.Status.Phase == api.PodSucceeded {
|
switch event.Object.(type) {
|
||||||
// volume.Recycle() returns nil on success, else error
|
case *api.Pod:
|
||||||
|
// POD changed
|
||||||
|
pod := event.Object.(*api.Pod)
|
||||||
|
glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase)
|
||||||
|
switch event.Type {
|
||||||
|
case watch.Added, watch.Modified:
|
||||||
|
if pod.Status.Phase == api.PodSucceeded {
|
||||||
|
// Recycle succeeded.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if watchedPod.Status.Phase == api.PodFailed {
|
if pod.Status.Phase == api.PodFailed {
|
||||||
// volume.Recycle() returns nil on success, else error
|
if pod.Status.Message != "" {
|
||||||
if watchedPod.Status.Message != "" {
|
return fmt.Errorf(pod.Status.Message)
|
||||||
return fmt.Errorf(watchedPod.Status.Message)
|
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("pod failed, pod.Status.Message unknown.")
|
return fmt.Errorf("pod failed, pod.Status.Message unknown.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case watch.Deleted:
|
||||||
|
return fmt.Errorf("recycler pod was deleted")
|
||||||
|
|
||||||
|
case watch.Error:
|
||||||
|
return fmt.Errorf("recycler pod watcher failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
case *api.Event:
|
||||||
|
// Event received
|
||||||
|
podEvent := event.Object.(*api.Event)
|
||||||
|
glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message)
|
||||||
|
if event.Type == watch.Added {
|
||||||
|
recyclerClient.Event(podEvent.Type, podEvent.Message)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -106,15 +132,24 @@ type recyclerClient interface {
|
||||||
CreatePod(pod *api.Pod) (*api.Pod, error)
|
CreatePod(pod *api.Pod) (*api.Pod, error)
|
||||||
GetPod(name, namespace string) (*api.Pod, error)
|
GetPod(name, namespace string) (*api.Pod, error)
|
||||||
DeletePod(name, namespace string) error
|
DeletePod(name, namespace string) error
|
||||||
WatchPod(name, namespace string, stopChannel chan struct{}) func() *api.Pod
|
// WatchPod returns a ListWatch for watching a pod. The stopChannel is used
|
||||||
|
// to close the reflector backing the watch. The caller is responsible for
|
||||||
|
// derring a close on the channel to stop the reflector.
|
||||||
|
WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error)
|
||||||
|
// Event sends an event to the volume that is being recycled.
|
||||||
|
Event(eventtype, message string)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRecyclerClient(client clientset.Interface) recyclerClient {
|
func newRecyclerClient(client clientset.Interface, recorder RecycleEventRecorder) recyclerClient {
|
||||||
return &realRecyclerClient{client}
|
return &realRecyclerClient{
|
||||||
|
client,
|
||||||
|
recorder,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type realRecyclerClient struct {
|
type realRecyclerClient struct {
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
|
recorder RecycleEventRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *realRecyclerClient) CreatePod(pod *api.Pod) (*api.Pod, error) {
|
func (c *realRecyclerClient) CreatePod(pod *api.Pod) (*api.Pod, error) {
|
||||||
|
@ -129,28 +164,60 @@ func (c *realRecyclerClient) DeletePod(name, namespace string) error {
|
||||||
return c.client.Core().Pods(namespace).Delete(name, nil)
|
return c.client.Core().Pods(namespace).Delete(name, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WatchPod returns a ListWatch for watching a pod. The stopChannel is used
|
func (c *realRecyclerClient) Event(eventtype, message string) {
|
||||||
// to close the reflector backing the watch. The caller is responsible for
|
c.recorder(eventtype, message)
|
||||||
// derring a close on the channel to stop the reflector.
|
|
||||||
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) func() *api.Pod {
|
|
||||||
fieldSelector, _ := fields.ParseSelector("metadata.name=" + name)
|
|
||||||
|
|
||||||
podLW := &cache.ListWatch{
|
|
||||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
|
||||||
options.FieldSelector = fieldSelector
|
|
||||||
return c.client.Core().Pods(namespace).List(options)
|
|
||||||
},
|
|
||||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
|
||||||
options.FieldSelector = fieldSelector
|
|
||||||
return c.client.Core().Pods(namespace).Watch(options)
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
|
|
||||||
cache.NewReflector(podLW, &api.Pod{}, queue, 1*time.Minute).RunUntil(stopChannel)
|
|
||||||
|
|
||||||
return func() *api.Pod {
|
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
|
||||||
return cache.Pop(queue).(*api.Pod)
|
podSelector, _ := fields.ParseSelector("metadata.name=" + name)
|
||||||
|
options := api.ListOptions{
|
||||||
|
FieldSelector: podSelector,
|
||||||
|
Watch: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
podWatch, err := c.client.Core().Pods(namespace).Watch(options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name)
|
||||||
|
eventWatch, err := c.client.Core().Events(namespace).Watch(api.ListOptions{
|
||||||
|
FieldSelector: eventSelector,
|
||||||
|
Watch: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
podWatch.Stop()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
eventCh := make(chan watch.Event, 0)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer eventWatch.Stop()
|
||||||
|
defer podWatch.Stop()
|
||||||
|
defer close(eventCh)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case _ = <-stopChannel:
|
||||||
|
return
|
||||||
|
|
||||||
|
case podEvent, ok := <-podWatch.ResultChan():
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
eventCh <- podEvent
|
||||||
|
|
||||||
|
case eventEvent, ok := <-eventWatch.ResultChan():
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
eventCh <- eventEvent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return eventCh, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a
|
// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a
|
||||||
|
|
6
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/admission.go
generated
vendored
6
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/admission.go
generated
vendored
|
@ -19,6 +19,7 @@ package autoprovision
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -26,8 +27,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/admission"
|
"k8s.io/kubernetes/pkg/admission"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/errors"
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -42,7 +42,7 @@ func init() {
|
||||||
type provision struct {
|
type provision struct {
|
||||||
*admission.Handler
|
*admission.Handler
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
namespaceInformer framework.SharedIndexInformer
|
namespaceInformer cache.SharedIndexInformer
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = admission.WantsInformerFactory(&provision{})
|
var _ = admission.WantsInformerFactory(&provision{})
|
||||||
|
|
|
@ -19,6 +19,7 @@ package exists
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -26,8 +27,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/admission"
|
"k8s.io/kubernetes/pkg/admission"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/errors"
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -42,7 +42,7 @@ func init() {
|
||||||
type exists struct {
|
type exists struct {
|
||||||
*admission.Handler
|
*admission.Handler
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
namespaceInformer framework.SharedIndexInformer
|
namespaceInformer cache.SharedIndexInformer
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = admission.WantsInformerFactory(&exists{})
|
var _ = admission.WantsInformerFactory(&exists{})
|
||||||
|
|
|
@ -23,9 +23,9 @@ import (
|
||||||
|
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/admission"
|
"k8s.io/kubernetes/pkg/admission"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
@ -52,7 +52,7 @@ type lifecycle struct {
|
||||||
*admission.Handler
|
*admission.Handler
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
immortalNamespaces sets.String
|
immortalNamespaces sets.String
|
||||||
namespaceInformer framework.SharedIndexInformer
|
namespaceInformer cache.SharedIndexInformer
|
||||||
// forceLiveLookupCache holds a list of entries for namespaces that we have a strong reason to believe are stale in our local cache.
|
// forceLiveLookupCache holds a list of entries for namespaces that we have a strong reason to believe are stale in our local cache.
|
||||||
// if a namespace is in this cache, then we will ignore our local state and always fetch latest from api server.
|
// if a namespace is in this cache, then we will ignore our local state and always fetch latest from api server.
|
||||||
forceLiveLookupCache *lru.Cache
|
forceLiveLookupCache *lru.Cache
|
||||||
|
|
|
@ -18,6 +18,7 @@ package predicates
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go
generated
vendored
|
@ -968,7 +968,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *api.Pod, allPods
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if affinity.PodAntiAffinity != nil {
|
if affinity != nil && affinity.PodAntiAffinity != nil {
|
||||||
existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName)
|
existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -29,7 +29,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/errors"
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
"k8s.io/kubernetes/pkg/types"
|
"k8s.io/kubernetes/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/util/runtime"
|
"k8s.io/kubernetes/pkg/util/runtime"
|
||||||
|
@ -77,8 +76,8 @@ type ConfigFactory struct {
|
||||||
// Close this to stop all reflectors
|
// Close this to stop all reflectors
|
||||||
StopEverything chan struct{}
|
StopEverything chan struct{}
|
||||||
|
|
||||||
scheduledPodPopulator *framework.Controller
|
scheduledPodPopulator *cache.Controller
|
||||||
nodePopulator *framework.Controller
|
nodePopulator *cache.Controller
|
||||||
|
|
||||||
schedulerCache schedulercache.Cache
|
schedulerCache schedulercache.Cache
|
||||||
|
|
||||||
|
@ -125,11 +124,11 @@ func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffini
|
||||||
// We construct this here instead of in CreateFromKeys because
|
// We construct this here instead of in CreateFromKeys because
|
||||||
// ScheduledPodLister is something we provide to plug in functions that
|
// ScheduledPodLister is something we provide to plug in functions that
|
||||||
// they may need to call.
|
// they may need to call.
|
||||||
c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = framework.NewIndexerInformer(
|
c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = cache.NewIndexerInformer(
|
||||||
c.createAssignedNonTerminatedPodLW(),
|
c.createAssignedNonTerminatedPodLW(),
|
||||||
&api.Pod{},
|
&api.Pod{},
|
||||||
0,
|
0,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: c.addPodToCache,
|
AddFunc: c.addPodToCache,
|
||||||
UpdateFunc: c.updatePodInCache,
|
UpdateFunc: c.updatePodInCache,
|
||||||
DeleteFunc: c.deletePodFromCache,
|
DeleteFunc: c.deletePodFromCache,
|
||||||
|
@ -137,11 +136,11 @@ func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffini
|
||||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||||
)
|
)
|
||||||
|
|
||||||
c.NodeLister.Store, c.nodePopulator = framework.NewInformer(
|
c.NodeLister.Store, c.nodePopulator = cache.NewInformer(
|
||||||
c.createNodeLW(),
|
c.createNodeLW(),
|
||||||
&api.Node{},
|
&api.Node{},
|
||||||
0,
|
0,
|
||||||
framework.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: c.addNodeToCache,
|
AddFunc: c.addNodeToCache,
|
||||||
UpdateFunc: c.updateNodeInCache,
|
UpdateFunc: c.updateNodeInCache,
|
||||||
DeleteFunc: c.deleteNodeFromCache,
|
DeleteFunc: c.deleteNodeFromCache,
|
||||||
|
|
|
@ -244,12 +244,12 @@ func (cache *schedulerCache) RemovePod(pod *api.Pod) error {
|
||||||
cache.mu.Lock()
|
cache.mu.Lock()
|
||||||
defer cache.mu.Unlock()
|
defer cache.mu.Unlock()
|
||||||
|
|
||||||
_, ok := cache.podStates[key]
|
cachedstate, ok := cache.podStates[key]
|
||||||
switch {
|
switch {
|
||||||
// An assumed pod won't have Delete/Remove event. It needs to have Add event
|
// An assumed pod won't have Delete/Remove event. It needs to have Add event
|
||||||
// before Remove event, in which case the state would change from Assumed to Added.
|
// before Remove event, in which case the state would change from Assumed to Added.
|
||||||
case ok && !cache.assumedPods[key]:
|
case ok && !cache.assumedPods[key]:
|
||||||
err := cache.removePod(pod)
|
err := cache.removePod(cachedstate.pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue