Upgrade to Kubernetes 1.4.1
parent
e3a05b0656
commit
2085b894be
File diff suppressed because it is too large
Load Diff
|
@ -45,7 +45,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
kclient "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
kframework "k8s.io/kubernetes/pkg/controller/framework"
|
||||
kselector "k8s.io/kubernetes/pkg/fields"
|
||||
etcdutil "k8s.io/kubernetes/pkg/storage/etcd/util"
|
||||
"k8s.io/kubernetes/pkg/util/validation"
|
||||
|
@ -556,11 +555,11 @@ func newKubeClient() (*kclient.Client, error) {
|
|||
}
|
||||
|
||||
func watchForServices(kubeClient *kclient.Client, ks *kube2sky) kcache.Store {
|
||||
serviceStore, serviceController := kframework.NewInformer(
|
||||
serviceStore, serviceController := kcache.NewInformer(
|
||||
createServiceLW(kubeClient),
|
||||
&kapi.Service{},
|
||||
resyncPeriod,
|
||||
kframework.ResourceEventHandlerFuncs{
|
||||
kcache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ks.newService,
|
||||
DeleteFunc: ks.removeService,
|
||||
UpdateFunc: ks.updateService,
|
||||
|
@ -571,11 +570,11 @@ func watchForServices(kubeClient *kclient.Client, ks *kube2sky) kcache.Store {
|
|||
}
|
||||
|
||||
func watchEndpoints(kubeClient *kclient.Client, ks *kube2sky) kcache.Store {
|
||||
eStore, eController := kframework.NewInformer(
|
||||
eStore, eController := kcache.NewInformer(
|
||||
createEndpointsLW(kubeClient),
|
||||
&kapi.Endpoints{},
|
||||
resyncPeriod,
|
||||
kframework.ResourceEventHandlerFuncs{
|
||||
kcache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ks.handleEndpointAdd,
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
// TODO: Avoid unwanted updates.
|
||||
|
@ -589,11 +588,11 @@ func watchEndpoints(kubeClient *kclient.Client, ks *kube2sky) kcache.Store {
|
|||
}
|
||||
|
||||
func watchPods(kubeClient *kclient.Client, ks *kube2sky) kcache.Store {
|
||||
eStore, eController := kframework.NewInformer(
|
||||
eStore, eController := kcache.NewInformer(
|
||||
createEndpointsPodLW(kubeClient),
|
||||
&kapi.Pod{},
|
||||
resyncPeriod,
|
||||
kframework.ResourceEventHandlerFuncs{
|
||||
kcache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ks.handlePodCreate,
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
ks.handlePodUpdate(oldObj, newObj)
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.4.3
|
||||
- 1.5.4
|
||||
- 1.6.3
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.4.3
|
||||
env: NOVET=true # No bundled vet.
|
||||
- go: 1.5.4
|
||||
- go: 1.6.3
|
||||
- go: 1.7
|
||||
- go: tip
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
|
@ -18,7 +16,3 @@ before_install:
|
|||
script:
|
||||
- PATH=$PATH:$PWD/bin go test -v ./...
|
||||
- go build
|
||||
- diff -u <(echo -n) <(gofmt -d -s .)
|
||||
- if [ -z $NOVET ]; then
|
||||
diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint');
|
||||
fi
|
||||
|
|
|
@ -663,7 +663,7 @@ command.SetUsageTemplate(s string)
|
|||
|
||||
## PreRun or PostRun Hooks
|
||||
|
||||
It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order:
|
||||
It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order:
|
||||
|
||||
- `PersistentPreRun`
|
||||
- `PreRun`
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
|
||||
BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extentions"
|
||||
BashCompCustom = "cobra_annotation_bash_completion_custom"
|
||||
BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
|
||||
BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
|
||||
|
@ -401,8 +401,10 @@ func writeLocalNonPersistentFlag(flag *pflag.Flag, w io.Writer) error {
|
|||
format += "="
|
||||
}
|
||||
format += "\")\n"
|
||||
_, err := fmt.Fprintf(w, format, name)
|
||||
return err
|
||||
if _, err := fmt.Fprintf(w, format, name); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeFlags(cmd *Command, w io.Writer) error {
|
||||
|
@ -419,9 +421,6 @@ func writeFlags(cmd *Command, w io.Writer) error {
|
|||
localNonPersistentFlags := cmd.LocalNonPersistentFlags()
|
||||
var visitErr error
|
||||
cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
||||
if nonCompletableFlag(flag) {
|
||||
return
|
||||
}
|
||||
if err := writeFlag(flag, w); err != nil {
|
||||
visitErr = err
|
||||
return
|
||||
|
@ -443,9 +442,6 @@ func writeFlags(cmd *Command, w io.Writer) error {
|
|||
return visitErr
|
||||
}
|
||||
cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
||||
if nonCompletableFlag(flag) {
|
||||
return
|
||||
}
|
||||
if err := writeFlag(flag, w); err != nil {
|
||||
visitErr = err
|
||||
return
|
||||
|
@ -472,9 +468,6 @@ func writeRequiredFlag(cmd *Command, w io.Writer) error {
|
|||
flags := cmd.NonInheritedFlags()
|
||||
var visitErr error
|
||||
flags.VisitAll(func(flag *pflag.Flag) {
|
||||
if nonCompletableFlag(flag) {
|
||||
return
|
||||
}
|
||||
for key := range flag.Annotations {
|
||||
switch key {
|
||||
case BashCompOneRequiredFlag:
|
||||
|
@ -581,10 +574,6 @@ func (cmd *Command) GenBashCompletion(w io.Writer) error {
|
|||
return postscript(w, cmd.Name())
|
||||
}
|
||||
|
||||
func nonCompletableFlag(flag *pflag.Flag) bool {
|
||||
return flag.Hidden || len(flag.Deprecated) > 0
|
||||
}
|
||||
|
||||
func (cmd *Command) GenBashCompletionFile(filename string) error {
|
||||
outFile, err := os.Create(filename)
|
||||
if err != nil {
|
||||
|
|
|
@ -37,36 +37,38 @@ var templateFuncs = template.FuncMap{
|
|||
|
||||
var initializers []func()
|
||||
|
||||
// Automatic prefix matching can be a dangerous thing to automatically enable in CLI tools.
|
||||
// Set this to true to enable it.
|
||||
// automatic prefix matching can be a dangerous thing to automatically enable in CLI tools.
|
||||
// Set this to true to enable it
|
||||
var EnablePrefixMatching = false
|
||||
|
||||
// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
|
||||
// To disable sorting, set it to false.
|
||||
//EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
|
||||
//To disable sorting, set it to false.
|
||||
var EnableCommandSorting = true
|
||||
|
||||
// AddTemplateFunc adds a template function that's available to Usage and Help
|
||||
// template generation.
|
||||
//AddTemplateFunc adds a template function that's available to Usage and Help
|
||||
//template generation.
|
||||
func AddTemplateFunc(name string, tmplFunc interface{}) {
|
||||
templateFuncs[name] = tmplFunc
|
||||
}
|
||||
|
||||
// AddTemplateFuncs adds multiple template functions availalble to Usage and
|
||||
// Help template generation.
|
||||
//AddTemplateFuncs adds multiple template functions availalble to Usage and
|
||||
//Help template generation.
|
||||
func AddTemplateFuncs(tmplFuncs template.FuncMap) {
|
||||
for k, v := range tmplFuncs {
|
||||
templateFuncs[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// OnInitialize takes a series of func() arguments and appends them to a slice of func().
|
||||
//OnInitialize takes a series of func() arguments and appends them to a slice of func().
|
||||
func OnInitialize(y ...func()) {
|
||||
initializers = append(initializers, y...)
|
||||
for _, x := range y {
|
||||
initializers = append(initializers, x)
|
||||
}
|
||||
}
|
||||
|
||||
// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
|
||||
// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
|
||||
// ints and then compared.
|
||||
//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
|
||||
//Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
|
||||
//ints and then compared.
|
||||
func Gt(a interface{}, b interface{}) bool {
|
||||
var left, right int64
|
||||
av := reflect.ValueOf(a)
|
||||
|
@ -94,7 +96,7 @@ func Gt(a interface{}, b interface{}) bool {
|
|||
return left > right
|
||||
}
|
||||
|
||||
// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
|
||||
//Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
|
||||
func Eq(a interface{}, b interface{}) bool {
|
||||
av := reflect.ValueOf(a)
|
||||
bv := reflect.ValueOf(b)
|
||||
|
@ -114,7 +116,7 @@ func trimRightSpace(s string) string {
|
|||
return strings.TrimRightFunc(s, unicode.IsSpace)
|
||||
}
|
||||
|
||||
// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
|
||||
// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s
|
||||
func appendIfNotPresent(s, stringToAppend string) string {
|
||||
if strings.Contains(s, stringToAppend) {
|
||||
return s
|
||||
|
@ -122,7 +124,7 @@ func appendIfNotPresent(s, stringToAppend string) string {
|
|||
return s + " " + stringToAppend
|
||||
}
|
||||
|
||||
// rpad adds padding to the right of a string.
|
||||
//rpad adds padding to the right of a string
|
||||
func rpad(s string, padding int) string {
|
||||
template := fmt.Sprintf("%%-%ds", padding)
|
||||
return fmt.Sprintf(template, s)
|
||||
|
@ -136,7 +138,7 @@ func tmpl(w io.Writer, text string, data interface{}) error {
|
|||
return t.Execute(w, data)
|
||||
}
|
||||
|
||||
// ld compares two strings and returns the levenshtein distance between them.
|
||||
// ld compares two strings and returns the levenshtein distance between them
|
||||
func ld(s, t string, ignoreCase bool) int {
|
||||
if ignoreCase {
|
||||
s = strings.ToLower(s)
|
||||
|
|
|
@ -140,17 +140,17 @@ func (c *Command) SetOutput(output io.Writer) {
|
|||
c.output = &output
|
||||
}
|
||||
|
||||
// Usage can be defined by application.
|
||||
// Usage can be defined by application
|
||||
func (c *Command) SetUsageFunc(f func(*Command) error) {
|
||||
c.usageFunc = f
|
||||
}
|
||||
|
||||
// Can be defined by Application.
|
||||
// Can be defined by Application
|
||||
func (c *Command) SetUsageTemplate(s string) {
|
||||
c.usageTemplate = s
|
||||
}
|
||||
|
||||
// Can be defined by Application.
|
||||
// Can be defined by Application
|
||||
func (c *Command) SetHelpFunc(f func(*Command, []string)) {
|
||||
c.helpFunc = f
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ func (c *Command) SetHelpCommand(cmd *Command) {
|
|||
c.helpCommand = cmd
|
||||
}
|
||||
|
||||
// Can be defined by Application.
|
||||
// Can be defined by Application
|
||||
func (c *Command) SetHelpTemplate(s string) {
|
||||
c.helpTemplate = s
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ func (c *Command) getOut(def io.Writer) io.Writer {
|
|||
}
|
||||
|
||||
// UsageFunc returns either the function set by SetUsageFunc for this command
|
||||
// or a parent, or it returns a default usage function.
|
||||
// or a parent, or it returns a default usage function
|
||||
func (c *Command) UsageFunc() (f func(*Command) error) {
|
||||
if c.usageFunc != nil {
|
||||
return c.usageFunc
|
||||
|
@ -214,15 +214,15 @@ func (c *Command) UsageFunc() (f func(*Command) error) {
|
|||
}
|
||||
}
|
||||
|
||||
// Usage puts out the usage for the command.
|
||||
// Used when a user provides invalid input.
|
||||
// Can be defined by user by overriding UsageFunc.
|
||||
// Output the usage for the command
|
||||
// Used when a user provides invalid input
|
||||
// Can be defined by user by overriding UsageFunc
|
||||
func (c *Command) Usage() error {
|
||||
return c.UsageFunc()(c)
|
||||
}
|
||||
|
||||
// HelpFunc returns either the function set by SetHelpFunc for this command
|
||||
// or a parent, or it returns a function with default help behavior.
|
||||
// or a parent, or it returns a function with default help behavior
|
||||
func (c *Command) HelpFunc() func(*Command, []string) {
|
||||
cmd := c
|
||||
for cmd != nil {
|
||||
|
@ -240,9 +240,9 @@ func (c *Command) HelpFunc() func(*Command, []string) {
|
|||
}
|
||||
}
|
||||
|
||||
// Help puts out the help for the command.
|
||||
// Used when a user calls help [command].
|
||||
// Can be defined by user by overriding HelpFunc.
|
||||
// Output the help for the command
|
||||
// Used when a user calls help [command]
|
||||
// Can be defined by user by overriding HelpFunc
|
||||
func (c *Command) Help() error {
|
||||
c.HelpFunc()(c, []string{})
|
||||
return nil
|
||||
|
@ -333,7 +333,7 @@ func (c *Command) HelpTemplate() string {
|
|||
{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
|
||||
}
|
||||
|
||||
// Really only used when casting a command to a commander.
|
||||
// Really only used when casting a command to a commander
|
||||
func (c *Command) resetChildrensParents() {
|
||||
for _, x := range c.commands {
|
||||
x.parent = c
|
||||
|
@ -745,13 +745,13 @@ func (c *Command) initHelpCmd() {
|
|||
c.AddCommand(c.helpCommand)
|
||||
}
|
||||
|
||||
// Used for testing.
|
||||
// Used for testing
|
||||
func (c *Command) ResetCommands() {
|
||||
c.commands = nil
|
||||
c.helpCommand = nil
|
||||
}
|
||||
|
||||
// Sorts commands by their names.
|
||||
// Sorts commands by their names
|
||||
type commandSorterByName []*Command
|
||||
|
||||
func (c commandSorterByName) Len() int { return len(c) }
|
||||
|
@ -831,18 +831,18 @@ main:
|
|||
}
|
||||
}
|
||||
|
||||
// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
|
||||
// Print is a convenience method to Print to the defined output, fallback to Stderr if not set
|
||||
func (c *Command) Print(i ...interface{}) {
|
||||
fmt.Fprint(c.OutOrStderr(), i...)
|
||||
}
|
||||
|
||||
// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
|
||||
// Println is a convenience method to Println to the defined output, fallback to Stderr if not set
|
||||
func (c *Command) Println(i ...interface{}) {
|
||||
str := fmt.Sprintln(i...)
|
||||
c.Print(str)
|
||||
}
|
||||
|
||||
// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
|
||||
// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set
|
||||
func (c *Command) Printf(format string, i ...interface{}) {
|
||||
str := fmt.Sprintf(format, i...)
|
||||
c.Print(str)
|
||||
|
@ -859,7 +859,7 @@ func (c *Command) CommandPath() string {
|
|||
return str
|
||||
}
|
||||
|
||||
// UseLine puts out the full usage for a given command (including parents).
|
||||
//The full usage for a given command (including parents)
|
||||
func (c *Command) UseLine() string {
|
||||
str := ""
|
||||
if c.HasParent() {
|
||||
|
@ -869,7 +869,7 @@ func (c *Command) UseLine() string {
|
|||
}
|
||||
|
||||
// For use in determining which flags have been assigned to which commands
|
||||
// and which persist.
|
||||
// and which persist
|
||||
func (c *Command) DebugFlags() {
|
||||
c.Println("DebugFlags called on", c.Name())
|
||||
var debugflags func(*Command)
|
||||
|
@ -944,18 +944,18 @@ func (c *Command) HasExample() bool {
|
|||
return len(c.Example) > 0
|
||||
}
|
||||
|
||||
// Runnable determines if the command is itself runnable.
|
||||
// Runnable determines if the command is itself runnable
|
||||
func (c *Command) Runnable() bool {
|
||||
return c.Run != nil || c.RunE != nil
|
||||
}
|
||||
|
||||
// HasSubCommands determines if the command has children commands.
|
||||
// HasSubCommands determines if the command has children commands
|
||||
func (c *Command) HasSubCommands() bool {
|
||||
return len(c.commands) > 0
|
||||
}
|
||||
|
||||
// IsAvailableCommand determines if a command is available as a non-help command
|
||||
// (this includes all non deprecated/hidden commands).
|
||||
// (this includes all non deprecated/hidden commands)
|
||||
func (c *Command) IsAvailableCommand() bool {
|
||||
if len(c.Deprecated) != 0 || c.Hidden {
|
||||
return false
|
||||
|
@ -974,7 +974,7 @@ func (c *Command) IsAvailableCommand() bool {
|
|||
|
||||
// IsHelpCommand determines if a command is a 'help' command; a help command is
|
||||
// determined by the fact that it is NOT runnable/hidden/deprecated, and has no
|
||||
// sub commands that are runnable/hidden/deprecated.
|
||||
// sub commands that are runnable/hidden/deprecated
|
||||
func (c *Command) IsHelpCommand() bool {
|
||||
|
||||
// if a command is runnable, deprecated, or hidden it is not a 'help' command
|
||||
|
@ -993,9 +993,9 @@ func (c *Command) IsHelpCommand() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// HasHelpSubCommands determines if a command has any available 'help' sub commands
|
||||
// HasHelpSubCommands determines if a command has any avilable 'help' sub commands
|
||||
// that need to be shown in the usage/help default template under 'additional help
|
||||
// topics'.
|
||||
// topics'
|
||||
func (c *Command) HasHelpSubCommands() bool {
|
||||
|
||||
// return true on the first found available 'help' sub command
|
||||
|
@ -1010,7 +1010,7 @@ func (c *Command) HasHelpSubCommands() bool {
|
|||
}
|
||||
|
||||
// HasAvailableSubCommands determines if a command has available sub commands that
|
||||
// need to be shown in the usage/help default template under 'available commands'.
|
||||
// need to be shown in the usage/help default template under 'available commands'
|
||||
func (c *Command) HasAvailableSubCommands() bool {
|
||||
|
||||
// return true on the first found available (non deprecated/help/hidden)
|
||||
|
@ -1026,18 +1026,17 @@ func (c *Command) HasAvailableSubCommands() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// HasParent determines if the command is a child command.
|
||||
// Determine if the command is a child command
|
||||
func (c *Command) HasParent() bool {
|
||||
return c.parent != nil
|
||||
}
|
||||
|
||||
// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists.
|
||||
// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists
|
||||
func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
|
||||
return c.globNormFunc
|
||||
}
|
||||
|
||||
// Flage returns the complete FlagSet that applies
|
||||
// to this command (local and persistent declared here and by all parents).
|
||||
// Get the complete FlagSet that applies to this command (local and persistent declared here and by all parents)
|
||||
func (c *Command) Flags() *flag.FlagSet {
|
||||
if c.flags == nil {
|
||||
c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||
|
@ -1049,7 +1048,7 @@ func (c *Command) Flags() *flag.FlagSet {
|
|||
return c.flags
|
||||
}
|
||||
|
||||
// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
|
||||
// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands
|
||||
func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
|
||||
persistentFlags := c.PersistentFlags()
|
||||
|
||||
|
@ -1062,7 +1061,7 @@ func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
|
|||
return out
|
||||
}
|
||||
|
||||
// LocalFlags returns the local FlagSet specifically set in the current command.
|
||||
// Get the local FlagSet specifically set in the current command
|
||||
func (c *Command) LocalFlags() *flag.FlagSet {
|
||||
c.mergePersistentFlags()
|
||||
|
||||
|
@ -1080,7 +1079,7 @@ func (c *Command) LocalFlags() *flag.FlagSet {
|
|||
return local
|
||||
}
|
||||
|
||||
// InheritedFlags returns all flags which were inherited from parents commands.
|
||||
// All Flags which were inherited from parents commands
|
||||
func (c *Command) InheritedFlags() *flag.FlagSet {
|
||||
c.mergePersistentFlags()
|
||||
|
||||
|
@ -1109,12 +1108,12 @@ func (c *Command) InheritedFlags() *flag.FlagSet {
|
|||
return inherited
|
||||
}
|
||||
|
||||
// NonInheritedFlags returns all flags which were not inherited from parent commands.
|
||||
// All Flags which were not inherited from parent commands
|
||||
func (c *Command) NonInheritedFlags() *flag.FlagSet {
|
||||
return c.LocalFlags()
|
||||
}
|
||||
|
||||
// PersistentFlags returns the persistent FlagSet specifically set in the current command.
|
||||
// Get the Persistent FlagSet specifically set in the current command
|
||||
func (c *Command) PersistentFlags() *flag.FlagSet {
|
||||
if c.pflags == nil {
|
||||
c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||
|
@ -1126,7 +1125,7 @@ func (c *Command) PersistentFlags() *flag.FlagSet {
|
|||
return c.pflags
|
||||
}
|
||||
|
||||
// ResetFlags is used in testing.
|
||||
// For use in testing
|
||||
func (c *Command) ResetFlags() {
|
||||
c.flagErrorBuf = new(bytes.Buffer)
|
||||
c.flagErrorBuf.Reset()
|
||||
|
@ -1136,50 +1135,50 @@ func (c *Command) ResetFlags() {
|
|||
c.pflags.SetOutput(c.flagErrorBuf)
|
||||
}
|
||||
|
||||
// Does the command contain any flags (local plus persistent from the entire structure).
|
||||
// Does the command contain any flags (local plus persistent from the entire structure)
|
||||
func (c *Command) HasFlags() bool {
|
||||
return c.Flags().HasFlags()
|
||||
}
|
||||
|
||||
// Does the command contain persistent flags.
|
||||
// Does the command contain persistent flags
|
||||
func (c *Command) HasPersistentFlags() bool {
|
||||
return c.PersistentFlags().HasFlags()
|
||||
}
|
||||
|
||||
// Does the command has flags specifically declared locally.
|
||||
// Does the command has flags specifically declared locally
|
||||
func (c *Command) HasLocalFlags() bool {
|
||||
return c.LocalFlags().HasFlags()
|
||||
}
|
||||
|
||||
// Does the command have flags inherited from its parent command.
|
||||
// Does the command have flags inherited from its parent command
|
||||
func (c *Command) HasInheritedFlags() bool {
|
||||
return c.InheritedFlags().HasFlags()
|
||||
}
|
||||
|
||||
// Does the command contain any flags (local plus persistent from the entire
|
||||
// structure) which are not hidden or deprecated.
|
||||
// structure) which are not hidden or deprecated
|
||||
func (c *Command) HasAvailableFlags() bool {
|
||||
return c.Flags().HasAvailableFlags()
|
||||
}
|
||||
|
||||
// Does the command contain persistent flags which are not hidden or deprecated.
|
||||
// Does the command contain persistent flags which are not hidden or deprecated
|
||||
func (c *Command) HasAvailablePersistentFlags() bool {
|
||||
return c.PersistentFlags().HasAvailableFlags()
|
||||
}
|
||||
|
||||
// Does the command has flags specifically declared locally which are not hidden
|
||||
// or deprecated.
|
||||
// or deprecated
|
||||
func (c *Command) HasAvailableLocalFlags() bool {
|
||||
return c.LocalFlags().HasAvailableFlags()
|
||||
}
|
||||
|
||||
// Does the command have flags inherited from its parent command which are
|
||||
// not hidden or deprecated.
|
||||
// not hidden or deprecated
|
||||
func (c *Command) HasAvailableInheritedFlags() bool {
|
||||
return c.InheritedFlags().HasAvailableFlags()
|
||||
}
|
||||
|
||||
// Flag climbs up the command tree looking for matching flag.
|
||||
// Flag climbs up the command tree looking for matching flag
|
||||
func (c *Command) Flag(name string) (flag *flag.Flag) {
|
||||
flag = c.Flags().Lookup(name)
|
||||
|
||||
|
@ -1190,7 +1189,7 @@ func (c *Command) Flag(name string) (flag *flag.Flag) {
|
|||
return
|
||||
}
|
||||
|
||||
// Recursively find matching persistent flag.
|
||||
// recursively find matching persistent flag
|
||||
func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
|
||||
if c.HasPersistentFlags() {
|
||||
flag = c.PersistentFlags().Lookup(name)
|
||||
|
@ -1202,7 +1201,7 @@ func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
|
|||
return
|
||||
}
|
||||
|
||||
// ParseFlags parses persistent flag tree and local flags.
|
||||
// ParseFlags parses persistent flag tree & local flags
|
||||
func (c *Command) ParseFlags(args []string) (err error) {
|
||||
if c.DisableFlagParsing {
|
||||
return nil
|
||||
|
@ -1212,7 +1211,7 @@ func (c *Command) ParseFlags(args []string) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// Parent returns a commands parent command.
|
||||
// Parent returns a commands parent command
|
||||
func (c *Command) Parent() *Command {
|
||||
return c.parent
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error {
|
|||
separator = opts.CommandSeparator
|
||||
}
|
||||
basename := strings.Replace(cmd.CommandPath(), " ", separator, -1)
|
||||
filename := filepath.Join(opts.Path, basename+"."+section)
|
||||
filename := filepath.Join(opts.Path, basename + "." + section)
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -5,12 +5,8 @@ language: go
|
|||
go:
|
||||
- 1.5.4
|
||||
- 1.6.3
|
||||
- 1.7
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
install:
|
||||
- go get github.com/golang/lint/golint
|
||||
- export PATH=$GOPATH/bin:$PATH
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package pflag
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// optional interface to indicate boolean flags that can be
|
||||
// supplied without "=value" text
|
||||
|
@ -27,7 +30,7 @@ func (b *boolValue) Type() string {
|
|||
return "bool"
|
||||
}
|
||||
|
||||
func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
|
||||
func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) }
|
||||
|
||||
func (b *boolValue) IsBoolFlag() bool { return true }
|
||||
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package pflag
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// -- count Value
|
||||
type countValue int
|
||||
|
@ -25,7 +28,7 @@ func (i *countValue) Type() string {
|
|||
return "count"
|
||||
}
|
||||
|
||||
func (i *countValue) String() string { return strconv.Itoa(int(*i)) }
|
||||
func (i *countValue) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
func countConv(sval string) (interface{}, error) {
|
||||
i, err := strconv.Atoi(sval)
|
||||
|
|
|
@ -419,36 +419,20 @@ func (f *FlagSet) PrintDefaults() {
|
|||
fmt.Fprintf(f.out(), "%s", usages)
|
||||
}
|
||||
|
||||
// defaultIsZeroValue returns true if the default value for this flag represents
|
||||
// a zero value.
|
||||
func (f *Flag) defaultIsZeroValue() bool {
|
||||
switch f.Value.(type) {
|
||||
case boolFlag:
|
||||
return f.DefValue == "false"
|
||||
case *durationValue:
|
||||
// Beginning in Go 1.7, duration zero values are "0s"
|
||||
return f.DefValue == "0" || f.DefValue == "0s"
|
||||
case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:
|
||||
return f.DefValue == "0"
|
||||
case *stringValue:
|
||||
return f.DefValue == ""
|
||||
case *ipValue, *ipMaskValue, *ipNetValue:
|
||||
return f.DefValue == "<nil>"
|
||||
case *intSliceValue, *stringSliceValue, *stringArrayValue:
|
||||
return f.DefValue == "[]"
|
||||
default:
|
||||
switch f.Value.String() {
|
||||
case "false":
|
||||
return true
|
||||
case "<nil>":
|
||||
return true
|
||||
case "":
|
||||
return true
|
||||
case "0":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
// isZeroValue guesses whether the string represents the zero
|
||||
// value for a flag. It is not accurate but in practice works OK.
|
||||
func isZeroValue(value string) bool {
|
||||
switch value {
|
||||
case "false":
|
||||
return true
|
||||
case "<nil>":
|
||||
return true
|
||||
case "":
|
||||
return true
|
||||
case "0":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// UnquoteUsage extracts a back-quoted name from the usage
|
||||
|
@ -471,19 +455,22 @@ func UnquoteUsage(flag *Flag) (name string, usage string) {
|
|||
break // Only one back quote; use type name.
|
||||
}
|
||||
}
|
||||
|
||||
name = flag.Value.Type()
|
||||
switch name {
|
||||
case "bool":
|
||||
// No explicit name, so use type if we can find one.
|
||||
name = "value"
|
||||
switch flag.Value.(type) {
|
||||
case boolFlag:
|
||||
name = ""
|
||||
case "float64":
|
||||
case *durationValue:
|
||||
name = "duration"
|
||||
case *float64Value:
|
||||
name = "float"
|
||||
case "int64":
|
||||
case *intValue, *int64Value:
|
||||
name = "int"
|
||||
case "uint64":
|
||||
case *stringValue:
|
||||
name = "string"
|
||||
case *uintValue, *uint64Value:
|
||||
name = "uint"
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -532,7 +519,7 @@ func (f *FlagSet) FlagUsages() string {
|
|||
}
|
||||
|
||||
line += usage
|
||||
if !flag.defaultIsZeroValue() {
|
||||
if !isZeroValue(flag.DefValue) {
|
||||
if flag.Value.Type() == "string" {
|
||||
line += fmt.Sprintf(" (default %q)", flag.DefValue)
|
||||
} else {
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package pflag
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// -- float32 Value
|
||||
type float32Value float32
|
||||
|
@ -20,7 +23,7 @@ func (f *float32Value) Type() string {
|
|||
return "float32"
|
||||
}
|
||||
|
||||
func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) }
|
||||
func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||
|
||||
func float32Conv(sval string) (interface{}, error) {
|
||||
v, err := strconv.ParseFloat(sval, 32)
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package pflag
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// -- float64 Value
|
||||
type float64Value float64
|
||||
|
@ -20,7 +23,7 @@ func (f *float64Value) Type() string {
|
|||
return "float64"
|
||||
}
|
||||
|
||||
func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
|
||||
func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||
|
||||
func float64Conv(sval string) (interface{}, error) {
|
||||
return strconv.ParseFloat(sval, 64)
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package pflag
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// -- int Value
|
||||
type intValue int
|
||||
|
@ -20,7 +23,7 @@ func (i *intValue) Type() string {
|
|||
return "int"
|
||||
}
|
||||
|
||||
func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
|
||||
func (i *intValue) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
func intConv(sval string) (interface{}, error) {
|
||||
return strconv.Atoi(sval)
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package pflag
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// -- int32 Value
|
||||
type int32Value int32
|
||||
|
@ -20,7 +23,7 @@ func (i *int32Value) Type() string {
|
|||
return "int32"
|
||||
}
|
||||
|
||||
func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) }
|
||||
func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
func int32Conv(sval string) (interface{}, error) {
|
||||
v, err := strconv.ParseInt(sval, 0, 32)
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package pflag
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// -- int64 Value
|
||||
type int64Value int64
|
||||
|
@ -20,7 +23,7 @@ func (i *int64Value) Type() string {
|
|||
return "int64"
|
||||
}
|
||||
|
||||
func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
|
||||
func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
func int64Conv(sval string) (interface{}, error) {
|
||||
return strconv.ParseInt(sval, 0, 64)
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package pflag
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// -- int8 Value
|
||||
type int8Value int8
|
||||
|
@ -20,7 +23,7 @@ func (i *int8Value) Type() string {
|
|||
return "int8"
|
||||
}
|
||||
|
||||
func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) }
|
||||
func (i *int8Value) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
func int8Conv(sval string) (interface{}, error) {
|
||||
v, err := strconv.ParseInt(sval, 0, 8)
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
package pflag
|
||||
|
||||
import "fmt"
|
||||
|
||||
// -- string Value
|
||||
type stringValue string
|
||||
|
||||
|
@ -16,7 +18,7 @@ func (s *stringValue) Type() string {
|
|||
return "string"
|
||||
}
|
||||
|
||||
func (s *stringValue) String() string { return string(*s) }
|
||||
func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) }
|
||||
|
||||
func stringConv(sval string) (interface{}, error) {
|
||||
return sval, nil
|
||||
|
|
|
@ -1,110 +0,0 @@
|
|||
package pflag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var _ = fmt.Fprint
|
||||
|
||||
// -- stringArray Value
|
||||
type stringArrayValue struct {
|
||||
value *[]string
|
||||
changed bool
|
||||
}
|
||||
|
||||
func newStringArrayValue(val []string, p *[]string) *stringArrayValue {
|
||||
ssv := new(stringArrayValue)
|
||||
ssv.value = p
|
||||
*ssv.value = val
|
||||
return ssv
|
||||
}
|
||||
|
||||
func (s *stringArrayValue) Set(val string) error {
|
||||
if !s.changed {
|
||||
*s.value = []string{val}
|
||||
s.changed = true
|
||||
} else {
|
||||
*s.value = append(*s.value, val)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringArrayValue) Type() string {
|
||||
return "stringArray"
|
||||
}
|
||||
|
||||
func (s *stringArrayValue) String() string {
|
||||
str, _ := writeAsCSV(*s.value)
|
||||
return "[" + str + "]"
|
||||
}
|
||||
|
||||
func stringArrayConv(sval string) (interface{}, error) {
|
||||
sval = strings.Trim(sval, "[]")
|
||||
// An empty string would cause a array with one (empty) string
|
||||
if len(sval) == 0 {
|
||||
return []string{}, nil
|
||||
}
|
||||
return readAsCSV(sval)
|
||||
}
|
||||
|
||||
// GetStringArray return the []string value of a flag with the given name
|
||||
func (f *FlagSet) GetStringArray(name string) ([]string, error) {
|
||||
val, err := f.getFlagType(name, "stringArray", stringArrayConv)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
return val.([]string), nil
|
||||
}
|
||||
|
||||
// StringArrayVar defines a string flag with specified name, default value, and usage string.
|
||||
// The argument p points to a []string variable in which to store the values of the multiple flags.
|
||||
// The value of each argument will not try to be separated by comma
|
||||
func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) {
|
||||
f.VarP(newStringArrayValue(value, p), name, "", usage)
|
||||
}
|
||||
|
||||
// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
|
||||
f.VarP(newStringArrayValue(value, p), name, shorthand, usage)
|
||||
}
|
||||
|
||||
// StringArrayVar defines a string flag with specified name, default value, and usage string.
|
||||
// The argument p points to a []string variable in which to store the value of the flag.
|
||||
// The value of each argument will not try to be separated by comma
|
||||
func StringArrayVar(p *[]string, name string, value []string, usage string) {
|
||||
CommandLine.VarP(newStringArrayValue(value, p), name, "", usage)
|
||||
}
|
||||
|
||||
// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
|
||||
func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
|
||||
CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage)
|
||||
}
|
||||
|
||||
// StringArray defines a string flag with specified name, default value, and usage string.
|
||||
// The return value is the address of a []string variable that stores the value of the flag.
|
||||
// The value of each argument will not try to be separated by comma
|
||||
func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string {
|
||||
p := []string{}
|
||||
f.StringArrayVarP(&p, name, "", value, usage)
|
||||
return &p
|
||||
}
|
||||
|
||||
// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string {
|
||||
p := []string{}
|
||||
f.StringArrayVarP(&p, name, shorthand, value, usage)
|
||||
return &p
|
||||
}
|
||||
|
||||
// StringArray defines a string flag with specified name, default value, and usage string.
|
||||
// The return value is the address of a []string variable that stores the value of the flag.
|
||||
// The value of each argument will not try to be separated by comma
|
||||
func StringArray(name string, value []string, usage string) *[]string {
|
||||
return CommandLine.StringArrayP(name, "", value, usage)
|
||||
}
|
||||
|
||||
// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
|
||||
func StringArrayP(name, shorthand string, value []string, usage string) *[]string {
|
||||
return CommandLine.StringArrayP(name, shorthand, value, usage)
|
||||
}
|
|
@ -1,7 +1,6 @@
|
|||
package pflag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
@ -22,28 +21,10 @@ func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
|
|||
return ssv
|
||||
}
|
||||
|
||||
func readAsCSV(val string) ([]string, error) {
|
||||
if val == "" {
|
||||
return []string{}, nil
|
||||
}
|
||||
func (s *stringSliceValue) Set(val string) error {
|
||||
stringReader := strings.NewReader(val)
|
||||
csvReader := csv.NewReader(stringReader)
|
||||
return csvReader.Read()
|
||||
}
|
||||
|
||||
func writeAsCSV(vals []string) (string, error) {
|
||||
b := &bytes.Buffer{}
|
||||
w := csv.NewWriter(b)
|
||||
err := w.Write(vals)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
w.Flush()
|
||||
return strings.TrimSuffix(b.String(), fmt.Sprintln()), nil
|
||||
}
|
||||
|
||||
func (s *stringSliceValue) Set(val string) error {
|
||||
v, err := readAsCSV(val)
|
||||
v, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -60,10 +41,7 @@ func (s *stringSliceValue) Type() string {
|
|||
return "stringSlice"
|
||||
}
|
||||
|
||||
func (s *stringSliceValue) String() string {
|
||||
str, _ := writeAsCSV(*s.value)
|
||||
return "[" + str + "]"
|
||||
}
|
||||
func (s *stringSliceValue) String() string { return "[" + strings.Join(*s.value, ",") + "]" }
|
||||
|
||||
func stringSliceConv(sval string) (interface{}, error) {
|
||||
sval = strings.Trim(sval, "[]")
|
||||
|
@ -71,7 +49,8 @@ func stringSliceConv(sval string) (interface{}, error) {
|
|||
if len(sval) == 0 {
|
||||
return []string{}, nil
|
||||
}
|
||||
return readAsCSV(sval)
|
||||
v := strings.Split(sval, ",")
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// GetStringSlice return the []string value of a flag with the given name
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package pflag
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// -- uint Value
|
||||
type uintValue uint
|
||||
|
@ -20,7 +23,7 @@ func (i *uintValue) Type() string {
|
|||
return "uint"
|
||||
}
|
||||
|
||||
func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
|
||||
func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
func uintConv(sval string) (interface{}, error) {
|
||||
v, err := strconv.ParseUint(sval, 0, 0)
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package pflag
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// -- uint16 value
|
||||
type uint16Value uint16
|
||||
|
@ -9,7 +12,7 @@ func newUint16Value(val uint16, p *uint16) *uint16Value {
|
|||
*p = val
|
||||
return (*uint16Value)(p)
|
||||
}
|
||||
|
||||
func (i *uint16Value) String() string { return fmt.Sprintf("%d", *i) }
|
||||
func (i *uint16Value) Set(s string) error {
|
||||
v, err := strconv.ParseUint(s, 0, 16)
|
||||
*i = uint16Value(v)
|
||||
|
@ -20,8 +23,6 @@ func (i *uint16Value) Type() string {
|
|||
return "uint16"
|
||||
}
|
||||
|
||||
func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
|
||||
|
||||
func uint16Conv(sval string) (interface{}, error) {
|
||||
v, err := strconv.ParseUint(sval, 0, 16)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,15 +1,18 @@
|
|||
package pflag
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// -- uint32 value
|
||||
// -- uint16 value
|
||||
type uint32Value uint32
|
||||
|
||||
func newUint32Value(val uint32, p *uint32) *uint32Value {
|
||||
*p = val
|
||||
return (*uint32Value)(p)
|
||||
}
|
||||
|
||||
func (i *uint32Value) String() string { return fmt.Sprintf("%d", *i) }
|
||||
func (i *uint32Value) Set(s string) error {
|
||||
v, err := strconv.ParseUint(s, 0, 32)
|
||||
*i = uint32Value(v)
|
||||
|
@ -20,8 +23,6 @@ func (i *uint32Value) Type() string {
|
|||
return "uint32"
|
||||
}
|
||||
|
||||
func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
|
||||
|
||||
func uint32Conv(sval string) (interface{}, error) {
|
||||
v, err := strconv.ParseUint(sval, 0, 32)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package pflag
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// -- uint64 Value
|
||||
type uint64Value uint64
|
||||
|
@ -20,7 +23,7 @@ func (i *uint64Value) Type() string {
|
|||
return "uint64"
|
||||
}
|
||||
|
||||
func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
|
||||
func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
func uint64Conv(sval string) (interface{}, error) {
|
||||
v, err := strconv.ParseUint(sval, 0, 64)
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package pflag
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// -- uint8 Value
|
||||
type uint8Value uint8
|
||||
|
@ -20,7 +23,7 @@ func (i *uint8Value) Type() string {
|
|||
return "uint8"
|
||||
}
|
||||
|
||||
func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
|
||||
func (i *uint8Value) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
func uint8Conv(sval string) (interface{}, error) {
|
||||
v, err := strconv.ParseUint(sval, 0, 8)
|
||||
|
|
|
@ -43,7 +43,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/apiserver/authenticator"
|
||||
"k8s.io/kubernetes/pkg/capabilities"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver/authorizer"
|
||||
|
@ -140,6 +140,28 @@ func Run(s *options.APIServer) error {
|
|||
glog.Fatalf("Failed to start kubelet client: %v", err)
|
||||
}
|
||||
|
||||
if s.StorageConfig.DeserializationCacheSize == 0 {
|
||||
// When size of cache is not explicitly set, estimate its size based on
|
||||
// target memory usage.
|
||||
glog.V(2).Infof("Initalizing deserialization cache size based on %dMB limit", s.TargetRAMMB)
|
||||
|
||||
// This is the heuristics that from memory capacity is trying to infer
|
||||
// the maximum number of nodes in the cluster and set cache sizes based
|
||||
// on that value.
|
||||
// From our documentation, we officially recomment 120GB machines for
|
||||
// 2000 nodes, and we scale from that point. Thus we assume ~60MB of
|
||||
// capacity per node.
|
||||
// TODO: We may consider deciding that some percentage of memory will
|
||||
// be used for the deserialization cache and divide it by the max object
|
||||
// size to compute its size. We may even go further and measure
|
||||
// collective sizes of the objects in the cache.
|
||||
clusterSize := s.TargetRAMMB / 60
|
||||
s.StorageConfig.DeserializationCacheSize = 25 * clusterSize
|
||||
if s.StorageConfig.DeserializationCacheSize < 1000 {
|
||||
s.StorageConfig.DeserializationCacheSize = 1000
|
||||
}
|
||||
}
|
||||
|
||||
storageGroupsToEncodingVersion, err := s.StorageGroupsToEncodingVersion()
|
||||
if err != nil {
|
||||
glog.Fatalf("error generating storage version map: %s", err)
|
||||
|
|
|
@ -51,9 +51,9 @@ import (
|
|||
"k8s.io/kubernetes/pkg/controller/deployment"
|
||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||
endpointcontroller "k8s.io/kubernetes/pkg/controller/endpoint"
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace"
|
||||
nodecontroller "k8s.io/kubernetes/pkg/controller/node"
|
||||
|
|
|
@ -85,9 +85,12 @@ func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.DurationVar(&s.UDPIdleTimeout.Duration, "udp-timeout", s.UDPIdleTimeout.Duration, "How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace")
|
||||
fs.Int32Var(&s.ConntrackMax, "conntrack-max", s.ConntrackMax,
|
||||
"Maximum number of NAT connections to track (0 to leave as-is).")
|
||||
"Maximum number of NAT connections to track (0 to leave as-is). This overrides conntrack-max-per-core and conntrack-min.")
|
||||
fs.MarkDeprecated("conntrack-max", "This feature will be removed in a later release.")
|
||||
fs.Int32Var(&s.ConntrackMaxPerCore, "conntrack-max-per-core", s.ConntrackMaxPerCore,
|
||||
"Maximum number of NAT connections to track per CPU core (0 to leave as-is). This is only considered if conntrack-max is 0.")
|
||||
"Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min).")
|
||||
fs.Int32Var(&s.ConntrackMin, "conntrack-min", s.ConntrackMin,
|
||||
"Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is).")
|
||||
fs.DurationVar(&s.ConntrackTCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", s.ConntrackTCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)")
|
||||
config.DefaultFeatureGate.AddFlag(fs)
|
||||
}
|
||||
|
|
|
@ -335,13 +335,22 @@ func (s *ProxyServer) Run() error {
|
|||
}
|
||||
|
||||
func getConntrackMax(config *options.ProxyServerConfig) (int, error) {
|
||||
if config.ConntrackMax > 0 && config.ConntrackMaxPerCore > 0 {
|
||||
return -1, fmt.Errorf("invalid config: ConntrackMax and ConntrackMaxPerCore are mutually exclusive")
|
||||
}
|
||||
if config.ConntrackMax > 0 {
|
||||
if config.ConntrackMaxPerCore > 0 {
|
||||
return -1, fmt.Errorf("invalid config: ConntrackMax and ConntrackMaxPerCore are mutually exclusive")
|
||||
}
|
||||
glog.V(3).Infof("getConntrackMax: using absolute conntrax-max (deprecated)")
|
||||
return int(config.ConntrackMax), nil
|
||||
} else if config.ConntrackMaxPerCore > 0 {
|
||||
return (int(config.ConntrackMaxPerCore) * runtime.NumCPU()), nil
|
||||
}
|
||||
if config.ConntrackMaxPerCore > 0 {
|
||||
floor := int(config.ConntrackMin)
|
||||
scaled := int(config.ConntrackMaxPerCore) * runtime.NumCPU()
|
||||
if scaled > floor {
|
||||
glog.V(3).Infof("getConntrackMax: using scaled conntrax-max-per-core")
|
||||
return scaled, nil
|
||||
}
|
||||
glog.V(3).Infof("getConntrackMax: using conntrax-min")
|
||||
return floor, nil
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package admission
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
)
|
||||
|
||||
// PluginInitializer is used for initialization of shareable resources between admission plugins.
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package admission
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
)
|
||||
|
||||
// Validator holds Validate functions, which are responsible for validation of initialized shared resources
|
||||
|
|
|
@ -2568,6 +2568,9 @@ func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList {
|
|||
allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...)
|
||||
}
|
||||
|
||||
// TODO(freehan): allow user to update loadbalancerSourceRanges
|
||||
allErrs = append(allErrs, ValidateImmutableField(service.Spec.LoadBalancerSourceRanges, oldService.Spec.LoadBalancerSourceRanges, field.NewPath("spec", "loadBalancerSourceRanges"))...)
|
||||
|
||||
allErrs = append(allErrs, ValidateService(service)...)
|
||||
return allErrs
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -66,12 +66,14 @@ type KubeProxyConfiguration struct {
|
|||
// Must be greater than 0. Only applicable for proxyMode=userspace.
|
||||
UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"`
|
||||
// conntrackMax is the maximum number of NAT connections to track (0 to
|
||||
// leave as-is). This takes precedence over conntrackMaxPerCore.
|
||||
// leave as-is). This takes precedence over conntrackMaxPerCore and conntrackMin.
|
||||
ConntrackMax int32 `json:"conntrackMax"`
|
||||
// conntrackMaxPerCore is the maximum number of NAT connections to track
|
||||
// per CPU core (0 to leave as-is). This value is only considered if
|
||||
// conntrackMax == 0.
|
||||
// per CPU core (0 to leave the limit as-is and ignore conntrackMin).
|
||||
ConntrackMaxPerCore int32 `json:"conntrackMaxPerCore"`
|
||||
// conntrackMin is the minimum value of connect-tracking records to allocate,
|
||||
// regardless of conntrackMaxPerCore (set conntrackMaxPerCore=0 to leave the limit as-is).
|
||||
ConntrackMin int32 `json:"conntrackMin"`
|
||||
// conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open
|
||||
// (e.g. '250ms', '2s'). Must be greater than 0.
|
||||
ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"`
|
||||
|
|
|
@ -89,6 +89,9 @@ func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) {
|
|||
if obj.ConntrackMaxPerCore == 0 {
|
||||
obj.ConntrackMaxPerCore = 32 * 1024
|
||||
}
|
||||
if obj.ConntrackMin == 0 {
|
||||
obj.ConntrackMin = 128 * 1024
|
||||
}
|
||||
}
|
||||
if obj.IPTablesMasqueradeBit == nil {
|
||||
temp := int32(14)
|
||||
|
|
|
@ -63,12 +63,14 @@ type KubeProxyConfiguration struct {
|
|||
// Must be greater than 0. Only applicable for proxyMode=userspace.
|
||||
UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"`
|
||||
// conntrackMax is the maximum number of NAT connections to track (0 to
|
||||
// leave as-is). This takes precedence over conntrackMaxPerCore.
|
||||
// leave as-is). This takes precedence over conntrackMaxPerCore and conntrackMin.
|
||||
ConntrackMax int32 `json:"conntrackMax"`
|
||||
// conntrackMaxPerCore is the maximum number of NAT connections to track
|
||||
// per CPU core (0 to leave as-is). This value is only considered if
|
||||
// conntrackMax == 0.
|
||||
// per CPU core (0 to leave the limit as-is and ignore conntrackMin).
|
||||
ConntrackMaxPerCore int32 `json:"conntrackMaxPerCore"`
|
||||
// conntrackMin is the minimum value of connect-tracking records to allocate,
|
||||
// regardless of conntrackMaxPerCore (set conntrackMaxPerCore=0 to leave the limit as-is).
|
||||
ConntrackMin int32 `json:"conntrackMin"`
|
||||
// conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open
|
||||
// (e.g. '250ms', '2s'). Must be greater than 0.
|
||||
ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"`
|
||||
|
|
|
@ -69,6 +69,7 @@ func autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyCon
|
|||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
||||
out.ConntrackMax = in.ConntrackMax
|
||||
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
|
||||
out.ConntrackMin = in.ConntrackMin
|
||||
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
|
||||
return nil
|
||||
}
|
||||
|
@ -98,6 +99,7 @@ func autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyCon
|
|||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
||||
out.ConntrackMax = in.ConntrackMax
|
||||
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
|
||||
out.ConntrackMin = in.ConntrackMin
|
||||
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
|
||||
return nil
|
||||
}
|
||||
|
|
1
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go
generated
vendored
|
@ -75,6 +75,7 @@ func DeepCopy_v1alpha1_KubeProxyConfiguration(in interface{}, out interface{}, c
|
|||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
||||
out.ConntrackMax = in.ConntrackMax
|
||||
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
|
||||
out.ConntrackMin = in.ConntrackMin
|
||||
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -162,6 +162,7 @@ func DeepCopy_componentconfig_KubeProxyConfiguration(in interface{}, out interfa
|
|||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
||||
out.ConntrackMax = in.ConntrackMax
|
||||
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
|
||||
out.ConntrackMin = in.ConntrackMin
|
||||
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pborman/uuid"
|
||||
|
||||
"k8s.io/kubernetes/pkg/apiserver"
|
||||
|
@ -39,7 +40,11 @@ type auditResponseWriter struct {
|
|||
}
|
||||
|
||||
func (a *auditResponseWriter) WriteHeader(code int) {
|
||||
fmt.Fprintf(a.out, "%s AUDIT: id=%q response=\"%d\"\n", time.Now().Format(time.RFC3339Nano), a.id, code)
|
||||
line := fmt.Sprintf("%s AUDIT: id=%q response=\"%d\"\n", time.Now().Format(time.RFC3339Nano), a.id, code)
|
||||
if _, err := fmt.Fprint(a.out, line); err != nil {
|
||||
glog.Errorf("Unable to write audit log: %s, the error is: %v", line, err)
|
||||
}
|
||||
|
||||
a.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
|
@ -92,8 +97,11 @@ func WithAudit(handler http.Handler, attributeGetter apiserver.RequestAttributeG
|
|||
}
|
||||
id := uuid.NewRandom().String()
|
||||
|
||||
fmt.Fprintf(out, "%s AUDIT: id=%q ip=%q method=%q user=%q as=%q namespace=%q uri=%q\n",
|
||||
line := fmt.Sprintf("%s AUDIT: id=%q ip=%q method=%q user=%q as=%q namespace=%q uri=%q\n",
|
||||
time.Now().Format(time.RFC3339Nano), id, utilnet.GetClientIP(req), req.Method, attribs.GetUser().GetName(), asuser, namespace, req.URL)
|
||||
if _, err := fmt.Fprint(out, line); err != nil {
|
||||
glog.Errorf("Unable to write audit log: %s, the error is: %v", line, err)
|
||||
}
|
||||
respWriter := decorateResponseWriter(w, out, id)
|
||||
handler.ServeHTTP(respWriter, req)
|
||||
})
|
||||
|
|
|
@ -43,7 +43,8 @@ func init() {
|
|||
|
||||
// NewRequestAuthenticator creates an http handler that tries to authenticate the given request as a user, and then
|
||||
// stores any such user found onto the provided context for the request. If authentication fails or returns an error
|
||||
// the failed handler is used. On success, handler is invoked to serve the request.
|
||||
// the failed handler is used. On success, "Authorization" header is removed from the request and handler
|
||||
// is invoked to serve the request.
|
||||
func NewRequestAuthenticator(mapper api.RequestContextMapper, auth authenticator.Request, failed http.Handler, handler http.Handler) (http.Handler, error) {
|
||||
return api.NewRequestContextFilter(
|
||||
mapper,
|
||||
|
@ -57,6 +58,9 @@ func NewRequestAuthenticator(mapper api.RequestContextMapper, auth authenticator
|
|||
return
|
||||
}
|
||||
|
||||
// authorization header is not required anymore in case of a successful authentication.
|
||||
req.Header.Del("Authorization")
|
||||
|
||||
if ctx, ok := mapper.Get(req); ok {
|
||||
mapper.Update(req, api.WithUser(ctx, user))
|
||||
}
|
||||
|
|
|
@ -14,13 +14,12 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
@ -28,13 +27,13 @@ import (
|
|||
|
||||
// Config contains all the settings for a Controller.
|
||||
type Config struct {
|
||||
// The queue for your objects; either a cache.FIFO or
|
||||
// a cache.DeltaFIFO. Your Process() function should accept
|
||||
// The queue for your objects; either a FIFO or
|
||||
// a DeltaFIFO. Your Process() function should accept
|
||||
// the output of this Oueue's Pop() method.
|
||||
cache.Queue
|
||||
Queue
|
||||
|
||||
// Something that can list and watch your objects.
|
||||
cache.ListerWatcher
|
||||
ListerWatcher
|
||||
|
||||
// Something that can process your objects.
|
||||
Process ProcessFunc
|
||||
|
@ -45,7 +44,7 @@ type Config struct {
|
|||
// Reprocess everything at least this often.
|
||||
// Note that if it takes longer for you to clear the queue than this
|
||||
// period, you will end up processing items in the order determined
|
||||
// by cache.FIFO.Replace(). Currently, this is random. If this is a
|
||||
// by FIFO.Replace(). Currently, this is random. If this is a
|
||||
// problem, we can change that replacement policy to append new
|
||||
// things to the end of the queue instead of replacing the entire
|
||||
// queue.
|
||||
|
@ -64,7 +63,7 @@ type ProcessFunc func(obj interface{}) error
|
|||
// Controller is a generic controller framework.
|
||||
type Controller struct {
|
||||
config Config
|
||||
reflector *cache.Reflector
|
||||
reflector *Reflector
|
||||
reflectorMutex sync.RWMutex
|
||||
}
|
||||
|
||||
|
@ -87,7 +86,7 @@ func New(c *Config) *Controller {
|
|||
// Run blocks; call via go.
|
||||
func (c *Controller) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
r := cache.NewReflector(
|
||||
r := NewReflector(
|
||||
c.config.ListerWatcher,
|
||||
c.config.ObjectType,
|
||||
c.config.Queue,
|
||||
|
@ -110,9 +109,9 @@ func (c *Controller) HasSynced() bool {
|
|||
|
||||
// Requeue adds the provided object back into the queue if it does not already exist.
|
||||
func (c *Controller) Requeue(obj interface{}) error {
|
||||
return c.config.Queue.AddIfNotPresent(cache.Deltas{
|
||||
cache.Delta{
|
||||
Type: cache.Sync,
|
||||
return c.config.Queue.AddIfNotPresent(Deltas{
|
||||
Delta{
|
||||
Type: Sync,
|
||||
Object: obj,
|
||||
},
|
||||
})
|
||||
|
@ -124,7 +123,7 @@ func (c *Controller) Requeue(obj interface{}) error {
|
|||
// concurrently.
|
||||
func (c *Controller) processLoop() {
|
||||
for {
|
||||
obj, err := c.config.Queue.Pop(cache.PopProcessFunc(c.config.Process))
|
||||
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
|
||||
if err != nil {
|
||||
if c.config.RetryOnError {
|
||||
// This is the safe way to re-enqueue.
|
||||
|
@ -145,7 +144,7 @@ func (c *Controller) processLoop() {
|
|||
// get called even if nothing changed. This is useful for periodically
|
||||
// evaluating or syncing something.
|
||||
// * OnDelete will get the final state of the item if it is known, otherwise
|
||||
// it will get an object of type cache.DeletedFinalStateUnknown. This can
|
||||
// it will get an object of type DeletedFinalStateUnknown. This can
|
||||
// happen if the watch is closed and misses the delete event and we don't
|
||||
// notice the deletion until the subsequent re-list.
|
||||
type ResourceEventHandler interface {
|
||||
|
@ -185,18 +184,18 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
|
|||
}
|
||||
|
||||
// DeletionHandlingMetaNamespaceKeyFunc checks for
|
||||
// cache.DeletedFinalStateUnknown objects before calling
|
||||
// cache.MetaNamespaceKeyFunc.
|
||||
// DeletedFinalStateUnknown objects before calling
|
||||
// MetaNamespaceKeyFunc.
|
||||
func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||
if d, ok := obj.(cache.DeletedFinalStateUnknown); ok {
|
||||
if d, ok := obj.(DeletedFinalStateUnknown); ok {
|
||||
return d.Key, nil
|
||||
}
|
||||
return cache.MetaNamespaceKeyFunc(obj)
|
||||
return MetaNamespaceKeyFunc(obj)
|
||||
}
|
||||
|
||||
// NewInformer returns a cache.Store and a controller for populating the store
|
||||
// NewInformer returns a Store and a controller for populating the store
|
||||
// while also providing event notifications. You should only used the returned
|
||||
// cache.Store for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// Store for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
|
@ -210,18 +209,18 @@ func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
|||
// * h is the object you want notifications sent to.
|
||||
//
|
||||
func NewInformer(
|
||||
lw cache.ListerWatcher,
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
) (cache.Store, *Controller) {
|
||||
) (Store, *Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := cache.NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
|
@ -232,9 +231,9 @@ func NewInformer(
|
|||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(cache.Deltas) {
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case cache.Sync, cache.Added, cache.Updated:
|
||||
case Sync, Added, Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
|
@ -246,7 +245,7 @@ func NewInformer(
|
|||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case cache.Deleted:
|
||||
case Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -259,9 +258,9 @@ func NewInformer(
|
|||
return clientState, New(cfg)
|
||||
}
|
||||
|
||||
// NewIndexerInformer returns a cache.Indexer and a controller for populating the index
|
||||
// NewIndexerInformer returns a Indexer and a controller for populating the index
|
||||
// while also providing event notifications. You should only used the returned
|
||||
// cache.Index for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// Index for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
|
@ -275,19 +274,19 @@ func NewInformer(
|
|||
// * h is the object you want notifications sent to.
|
||||
//
|
||||
func NewIndexerInformer(
|
||||
lw cache.ListerWatcher,
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
indexers cache.Indexers,
|
||||
) (cache.Indexer, *Controller) {
|
||||
indexers Indexers,
|
||||
) (Indexer, *Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
|
@ -298,9 +297,9 @@ func NewIndexerInformer(
|
|||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(cache.Deltas) {
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case cache.Sync, cache.Added, cache.Updated:
|
||||
case Sync, Added, Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
|
@ -312,7 +311,7 @@ func NewIndexerInformer(
|
|||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case cache.Deleted:
|
||||
case Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
|
@ -14,14 +14,13 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
)
|
||||
|
@ -39,7 +38,7 @@ type SharedInformer interface {
|
|||
// You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned.
|
||||
// TODO we should try to remove this restriction eventually.
|
||||
AddEventHandler(handler ResourceEventHandler) error
|
||||
GetStore() cache.Store
|
||||
GetStore() Store
|
||||
// GetController gives back a synthetic interface that "votes" to start the informer
|
||||
GetController() ControllerInterface
|
||||
Run(stopCh <-chan struct{})
|
||||
|
@ -50,24 +49,24 @@ type SharedInformer interface {
|
|||
type SharedIndexInformer interface {
|
||||
SharedInformer
|
||||
// AddIndexers add indexers to the informer before it starts.
|
||||
AddIndexers(indexers cache.Indexers) error
|
||||
GetIndexer() cache.Indexer
|
||||
AddIndexers(indexers Indexers) error
|
||||
GetIndexer() Indexer
|
||||
}
|
||||
|
||||
// NewSharedInformer creates a new instance for the listwatcher.
|
||||
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
|
||||
// be shared amongst all consumers.
|
||||
func NewSharedInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
|
||||
return NewSharedIndexInformer(lw, objType, resyncPeriod, cache.Indexers{})
|
||||
func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
|
||||
return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{})
|
||||
}
|
||||
|
||||
// NewSharedIndexInformer creates a new instance for the listwatcher.
|
||||
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
|
||||
// be shared amongst all consumers.
|
||||
func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers cache.Indexers) SharedIndexInformer {
|
||||
func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
|
||||
sharedIndexInformer := &sharedIndexInformer{
|
||||
processor: &sharedProcessor{},
|
||||
indexer: cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
|
||||
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
|
||||
listerWatcher: lw,
|
||||
objectType: objType,
|
||||
fullResyncPeriod: resyncPeriod,
|
||||
|
@ -76,13 +75,13 @@ func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resy
|
|||
}
|
||||
|
||||
type sharedIndexInformer struct {
|
||||
indexer cache.Indexer
|
||||
indexer Indexer
|
||||
controller *Controller
|
||||
|
||||
processor *sharedProcessor
|
||||
|
||||
// This block is tracked to handle late initialization of the controller
|
||||
listerWatcher cache.ListerWatcher
|
||||
listerWatcher ListerWatcher
|
||||
objectType runtime.Object
|
||||
fullResyncPeriod time.Duration
|
||||
|
||||
|
@ -129,7 +128,7 @@ type deleteNotification struct {
|
|||
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, s.indexer)
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, s.indexer)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
|
@ -180,15 +179,15 @@ func (s *sharedIndexInformer) LastSyncResourceVersion() string {
|
|||
return s.controller.reflector.LastSyncResourceVersion()
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetStore() cache.Store {
|
||||
func (s *sharedIndexInformer) GetStore() Store {
|
||||
return s.indexer
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetIndexer() cache.Indexer {
|
||||
func (s *sharedIndexInformer) GetIndexer() Indexer {
|
||||
return s.indexer
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) AddIndexers(indexers cache.Indexers) error {
|
||||
func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
|
@ -240,9 +239,9 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
|||
defer s.blockDeltas.Unlock()
|
||||
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(cache.Deltas) {
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case cache.Sync, cache.Added, cache.Updated:
|
||||
case Sync, Added, Updated:
|
||||
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
|
||||
if err := s.indexer.Update(d.Object); err != nil {
|
||||
return err
|
||||
|
@ -254,7 +253,7 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
|||
}
|
||||
s.processor.distribute(addNotification{newObj: d.Object})
|
||||
}
|
||||
case cache.Deleted:
|
||||
case Deleted:
|
||||
if err := s.indexer.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
|
@ -29,7 +29,6 @@ import (
|
|||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilcertificates "k8s.io/kubernetes/pkg/util/certificates"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
|
@ -47,7 +46,7 @@ type CertificateController struct {
|
|||
kubeClient clientset.Interface
|
||||
|
||||
// CSR framework and store
|
||||
csrController *framework.Controller
|
||||
csrController *cache.Controller
|
||||
csrStore cache.StoreToCertificateRequestLister
|
||||
|
||||
// To allow injection of updateCertificateRequestStatus for testing.
|
||||
|
@ -85,7 +84,7 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du
|
|||
}
|
||||
|
||||
// Manage the addition/update of certificate requests
|
||||
cc.csrStore.Store, cc.csrController = framework.NewInformer(
|
||||
cc.csrStore.Store, cc.csrController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return cc.kubeClient.Certificates().CertificateSigningRequests().List(options)
|
||||
|
@ -96,7 +95,7 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du
|
|||
},
|
||||
&certificates.CertificateSigningRequest{},
|
||||
syncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
csr := obj.(*certificates.CertificateSigningRequest)
|
||||
glog.V(4).Infof("Adding certificate request %s", csr.Name)
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
|
@ -54,7 +53,7 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
KeyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
|
||||
KeyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
|
||||
)
|
||||
|
||||
type ResyncPeriodFunc func() time.Duration
|
||||
|
@ -220,6 +219,8 @@ type Expectations interface {
|
|||
|
||||
// ControlleeExpectations track controllee creates/deletes.
|
||||
type ControlleeExpectations struct {
|
||||
// Important: Since these two int64 fields are using sync/atomic, they have to be at the top of the struct due to a bug on 32-bit platforms
|
||||
// See: https://golang.org/pkg/sync/atomic/ for more information
|
||||
add int64
|
||||
del int64
|
||||
key string
|
||||
|
|
|
@ -23,8 +23,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
|
@ -34,8 +32,7 @@ import (
|
|||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
@ -45,6 +42,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/watch"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -75,7 +74,7 @@ type DaemonSetsController struct {
|
|||
// we have a personal informer, we must start it ourselves. If you start
|
||||
// the controller using NewDaemonSetsController(passing SharedInformer), this
|
||||
// will be null
|
||||
internalPodInformer framework.SharedInformer
|
||||
internalPodInformer cache.SharedInformer
|
||||
|
||||
// An dsc is temporarily suspended after creating/deleting these many replicas.
|
||||
// It resumes normal action after observing the watch events for them.
|
||||
|
@ -92,11 +91,11 @@ type DaemonSetsController struct {
|
|||
// A store of nodes
|
||||
nodeStore cache.StoreToNodeLister
|
||||
// Watches changes to all daemon sets.
|
||||
dsController *framework.Controller
|
||||
dsController *cache.Controller
|
||||
// Watches changes to all pods
|
||||
podController framework.ControllerInterface
|
||||
podController cache.ControllerInterface
|
||||
// Watches changes to all nodes.
|
||||
nodeController *framework.Controller
|
||||
nodeController *cache.Controller
|
||||
// podStoreSynced returns true if the pod store has been synced at least once.
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
podStoreSynced func() bool
|
||||
|
@ -107,7 +106,7 @@ type DaemonSetsController struct {
|
|||
queue *workqueue.Type
|
||||
}
|
||||
|
||||
func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController {
|
||||
func NewDaemonSetsController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
|
@ -128,7 +127,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
|
|||
queue: workqueue.NewNamed("daemonset"),
|
||||
}
|
||||
// Manage addition/update of daemon sets.
|
||||
dsc.dsStore.Store, dsc.dsController = framework.NewInformer(
|
||||
dsc.dsStore.Store, dsc.dsController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dsc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options)
|
||||
|
@ -140,7 +139,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
|
|||
&extensions.DaemonSet{},
|
||||
// TODO: Can we have much longer period here?
|
||||
FullDaemonSetResyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
ds := obj.(*extensions.DaemonSet)
|
||||
glog.V(4).Infof("Adding daemon set %s", ds.Name)
|
||||
|
@ -173,7 +172,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
|
|||
|
||||
// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
|
||||
// more pods until all the effects (expectations) of a daemon set's create/delete have been observed.
|
||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
||||
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: dsc.addPod,
|
||||
UpdateFunc: dsc.updatePod,
|
||||
DeleteFunc: dsc.deletePod,
|
||||
|
@ -183,7 +182,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
|
|||
dsc.podStoreSynced = podInformer.HasSynced
|
||||
|
||||
// Watch for new nodes or updates to nodes - daemon pods are launched on new nodes, and possibly when labels on nodes change,
|
||||
dsc.nodeStore.Store, dsc.nodeController = framework.NewInformer(
|
||||
dsc.nodeStore.Store, dsc.nodeController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dsc.kubeClient.Core().Nodes().List(options)
|
||||
|
@ -194,7 +193,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
|
|||
},
|
||||
&api.Node{},
|
||||
resyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: dsc.addNode,
|
||||
UpdateFunc: dsc.updateNode,
|
||||
},
|
||||
|
|
|
@ -36,7 +36,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
@ -70,15 +69,15 @@ type DeploymentController struct {
|
|||
// A store of deployments, populated by the dController
|
||||
dStore cache.StoreToDeploymentLister
|
||||
// Watches changes to all deployments
|
||||
dController *framework.Controller
|
||||
dController *cache.Controller
|
||||
// A store of ReplicaSets, populated by the rsController
|
||||
rsStore cache.StoreToReplicaSetLister
|
||||
// Watches changes to all ReplicaSets
|
||||
rsController *framework.Controller
|
||||
rsController *cache.Controller
|
||||
// A store of pods, populated by the podController
|
||||
podStore cache.StoreToPodLister
|
||||
// Watches changes to all pods
|
||||
podController *framework.Controller
|
||||
podController *cache.Controller
|
||||
|
||||
// dStoreSynced returns true if the Deployment store has been synced at least once.
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
|
@ -110,7 +109,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
|||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
|
||||
}
|
||||
|
||||
dc.dStore.Indexer, dc.dController = framework.NewIndexerInformer(
|
||||
dc.dStore.Indexer, dc.dController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.client.Extensions().Deployments(api.NamespaceAll).List(options)
|
||||
|
@ -121,7 +120,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
|||
},
|
||||
&extensions.Deployment{},
|
||||
FullDeploymentResyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: dc.addDeploymentNotification,
|
||||
UpdateFunc: dc.updateDeploymentNotification,
|
||||
// This will enter the sync loop and no-op, because the deployment has been deleted from the store.
|
||||
|
@ -130,7 +129,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
|||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
dc.rsStore.Store, dc.rsController = framework.NewInformer(
|
||||
dc.rsStore.Store, dc.rsController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.client.Extensions().ReplicaSets(api.NamespaceAll).List(options)
|
||||
|
@ -141,14 +140,14 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
|||
},
|
||||
&extensions.ReplicaSet{},
|
||||
resyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: dc.addReplicaSet,
|
||||
UpdateFunc: dc.updateReplicaSet,
|
||||
DeleteFunc: dc.deleteReplicaSet,
|
||||
},
|
||||
)
|
||||
|
||||
dc.podStore.Indexer, dc.podController = framework.NewIndexerInformer(
|
||||
dc.podStore.Indexer, dc.podController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.client.Core().Pods(api.NamespaceAll).List(options)
|
||||
|
@ -159,7 +158,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
|||
},
|
||||
&api.Pod{},
|
||||
resyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: dc.addPod,
|
||||
UpdateFunc: dc.updatePod,
|
||||
DeleteFunc: dc.deletePod,
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/record"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
|
@ -47,22 +46,22 @@ type DisruptionController struct {
|
|||
kubeClient *client.Client
|
||||
|
||||
pdbStore cache.Store
|
||||
pdbController *framework.Controller
|
||||
pdbController *cache.Controller
|
||||
pdbLister cache.StoreToPodDisruptionBudgetLister
|
||||
|
||||
podController framework.ControllerInterface
|
||||
podController cache.ControllerInterface
|
||||
podLister cache.StoreToPodLister
|
||||
|
||||
rcIndexer cache.Indexer
|
||||
rcController *framework.Controller
|
||||
rcController *cache.Controller
|
||||
rcLister cache.StoreToReplicationControllerLister
|
||||
|
||||
rsStore cache.Store
|
||||
rsController *framework.Controller
|
||||
rsController *cache.Controller
|
||||
rsLister cache.StoreToReplicaSetLister
|
||||
|
||||
dIndexer cache.Indexer
|
||||
dController *framework.Controller
|
||||
dController *cache.Controller
|
||||
dLister cache.StoreToDeploymentLister
|
||||
|
||||
queue *workqueue.Type
|
||||
|
@ -84,7 +83,7 @@ type controllerAndScale struct {
|
|||
// controllers and their scale.
|
||||
type podControllerFinder func(*api.Pod) ([]controllerAndScale, error)
|
||||
|
||||
func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClient *client.Client) *DisruptionController {
|
||||
func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient *client.Client) *DisruptionController {
|
||||
dc := &DisruptionController{
|
||||
kubeClient: kubeClient,
|
||||
podController: podInformer.GetController(),
|
||||
|
@ -97,13 +96,13 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
|
|||
|
||||
dc.podLister.Indexer = podInformer.GetIndexer()
|
||||
|
||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
||||
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: dc.addPod,
|
||||
UpdateFunc: dc.updatePod,
|
||||
DeleteFunc: dc.deletePod,
|
||||
})
|
||||
|
||||
dc.pdbStore, dc.pdbController = framework.NewInformer(
|
||||
dc.pdbStore, dc.pdbController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Policy().PodDisruptionBudgets(api.NamespaceAll).List(options)
|
||||
|
@ -114,7 +113,7 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
|
|||
},
|
||||
&policy.PodDisruptionBudget{},
|
||||
30*time.Second,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: dc.addDb,
|
||||
UpdateFunc: dc.updateDb,
|
||||
DeleteFunc: dc.removeDb,
|
||||
|
@ -122,7 +121,7 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
|
|||
)
|
||||
dc.pdbLister.Store = dc.pdbStore
|
||||
|
||||
dc.rcIndexer, dc.rcController = framework.NewIndexerInformer(
|
||||
dc.rcIndexer, dc.rcController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.ReplicationControllers(api.NamespaceAll).List(options)
|
||||
|
@ -133,13 +132,13 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
|
|||
},
|
||||
&api.ReplicationController{},
|
||||
30*time.Second,
|
||||
framework.ResourceEventHandlerFuncs{},
|
||||
cache.ResourceEventHandlerFuncs{},
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
dc.rcLister.Indexer = dc.rcIndexer
|
||||
|
||||
dc.rsStore, dc.rsController = framework.NewInformer(
|
||||
dc.rsStore, dc.rsController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
|
||||
|
@ -150,12 +149,12 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
|
|||
},
|
||||
&extensions.ReplicaSet{},
|
||||
30*time.Second,
|
||||
framework.ResourceEventHandlerFuncs{},
|
||||
cache.ResourceEventHandlerFuncs{},
|
||||
)
|
||||
|
||||
dc.rsLister.Store = dc.rsStore
|
||||
|
||||
dc.dIndexer, dc.dController = framework.NewIndexerInformer(
|
||||
dc.dIndexer, dc.dController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Extensions().Deployments(api.NamespaceAll).List(options)
|
||||
|
@ -166,7 +165,7 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
|
|||
},
|
||||
&extensions.Deployment{},
|
||||
30*time.Second,
|
||||
framework.ResourceEventHandlerFuncs{},
|
||||
cache.ResourceEventHandlerFuncs{},
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
|
|
|
@ -34,8 +34,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
@ -66,11 +65,11 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
|
||||
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
|
||||
)
|
||||
|
||||
// NewEndpointController returns a new *EndpointController.
|
||||
func NewEndpointController(podInformer framework.SharedIndexInformer, client *clientset.Clientset) *EndpointController {
|
||||
func NewEndpointController(podInformer cache.SharedIndexInformer, client *clientset.Clientset) *EndpointController {
|
||||
if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().GetRESTClient().GetRateLimiter())
|
||||
}
|
||||
|
@ -79,7 +78,7 @@ func NewEndpointController(podInformer framework.SharedIndexInformer, client *cl
|
|||
queue: workqueue.NewNamed("endpoint"),
|
||||
}
|
||||
|
||||
e.serviceStore.Store, e.serviceController = framework.NewInformer(
|
||||
e.serviceStore.Store, e.serviceController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Core().Services(api.NamespaceAll).List(options)
|
||||
|
@ -91,7 +90,7 @@ func NewEndpointController(podInformer framework.SharedIndexInformer, client *cl
|
|||
&api.Service{},
|
||||
// TODO: Can we have much longer period here?
|
||||
FullServiceResyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: e.enqueueService,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
e.enqueueService(cur)
|
||||
|
@ -100,7 +99,7 @@ func NewEndpointController(podInformer framework.SharedIndexInformer, client *cl
|
|||
},
|
||||
)
|
||||
|
||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
||||
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: e.addPod,
|
||||
UpdateFunc: e.updatePod,
|
||||
DeleteFunc: e.deletePod,
|
||||
|
@ -133,7 +132,7 @@ type EndpointController struct {
|
|||
// we have a personal informer, we must start it ourselves. If you start
|
||||
// the controller using NewEndpointController(passing SharedInformer), this
|
||||
// will be null
|
||||
internalPodInformer framework.SharedIndexInformer
|
||||
internalPodInformer cache.SharedIndexInformer
|
||||
|
||||
// Services that need to be updated. A channel is inappropriate here,
|
||||
// because it allows services with lots of pods to be serviced much
|
||||
|
@ -144,8 +143,8 @@ type EndpointController struct {
|
|||
|
||||
// Since we join two objects, we'll watch both of them with
|
||||
// controllers.
|
||||
serviceController *framework.Controller
|
||||
podController framework.ControllerInterface
|
||||
serviceController *cache.Controller
|
||||
podController cache.ControllerInterface
|
||||
// podStoreSynced returns true if the pod store has been synced at least once.
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
podStoreSynced func() bool
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package framework implements all the grunt work involved in running a simple controller.
|
||||
package framework
|
|
@ -1,262 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
func NewFakeControllerSource() *FakeControllerSource {
|
||||
return &FakeControllerSource{
|
||||
Items: map[nnu]runtime.Object{},
|
||||
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
|
||||
}
|
||||
}
|
||||
|
||||
func NewFakePVControllerSource() *FakePVControllerSource {
|
||||
return &FakePVControllerSource{
|
||||
FakeControllerSource{
|
||||
Items: map[nnu]runtime.Object{},
|
||||
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
|
||||
}}
|
||||
}
|
||||
|
||||
func NewFakePVCControllerSource() *FakePVCControllerSource {
|
||||
return &FakePVCControllerSource{
|
||||
FakeControllerSource{
|
||||
Items: map[nnu]runtime.Object{},
|
||||
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
|
||||
}}
|
||||
}
|
||||
|
||||
// FakeControllerSource implements listing/watching for testing.
|
||||
type FakeControllerSource struct {
|
||||
lock sync.RWMutex
|
||||
Items map[nnu]runtime.Object
|
||||
changes []watch.Event // one change per resourceVersion
|
||||
Broadcaster *watch.Broadcaster
|
||||
}
|
||||
|
||||
type FakePVControllerSource struct {
|
||||
FakeControllerSource
|
||||
}
|
||||
|
||||
type FakePVCControllerSource struct {
|
||||
FakeControllerSource
|
||||
}
|
||||
|
||||
// namespace, name, uid to be used as a key.
|
||||
type nnu struct {
|
||||
namespace, name string
|
||||
uid types.UID
|
||||
}
|
||||
|
||||
// Add adds an object to the set and sends an add event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Add(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Added, Object: obj}, 1)
|
||||
}
|
||||
|
||||
// Modify updates an object in the set and sends a modified event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Modify(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 1)
|
||||
}
|
||||
|
||||
// Delete deletes an object from the set and sends a delete event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Delete(lastValue runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 1)
|
||||
}
|
||||
|
||||
// AddDropWatch adds an object to the set but forgets to send an add event to
|
||||
// watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) AddDropWatch(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Added, Object: obj}, 0)
|
||||
}
|
||||
|
||||
// ModifyDropWatch updates an object in the set but forgets to send a modify
|
||||
// event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) ModifyDropWatch(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 0)
|
||||
}
|
||||
|
||||
// DeleteDropWatch deletes an object from the set but forgets to send a delete
|
||||
// event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) DeleteDropWatch(lastValue runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 0)
|
||||
}
|
||||
|
||||
func (f *FakeControllerSource) key(accessor meta.Object) nnu {
|
||||
return nnu{accessor.GetNamespace(), accessor.GetName(), accessor.GetUID()}
|
||||
}
|
||||
|
||||
// Change records the given event (setting the object's resource version) and
|
||||
// sends a watch event with the specified probability.
|
||||
func (f *FakeControllerSource) Change(e watch.Event, watchProbability float64) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
accessor, err := meta.Accessor(e.Object)
|
||||
if err != nil {
|
||||
panic(err) // this is test code only
|
||||
}
|
||||
|
||||
resourceVersion := len(f.changes) + 1
|
||||
accessor.SetResourceVersion(strconv.Itoa(resourceVersion))
|
||||
f.changes = append(f.changes, e)
|
||||
key := f.key(accessor)
|
||||
switch e.Type {
|
||||
case watch.Added, watch.Modified:
|
||||
f.Items[key] = e.Object
|
||||
case watch.Deleted:
|
||||
delete(f.Items, key)
|
||||
}
|
||||
|
||||
if rand.Float64() < watchProbability {
|
||||
f.Broadcaster.Action(e.Type, e.Object)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeControllerSource) getListItemsLocked() ([]runtime.Object, error) {
|
||||
list := make([]runtime.Object, 0, len(f.Items))
|
||||
for _, obj := range f.Items {
|
||||
// Must make a copy to allow clients to modify the object.
|
||||
// Otherwise, if they make a change and write it back, they
|
||||
// will inadvertently change our canonical copy (in
|
||||
// addition to racing with other clients).
|
||||
objCopy, err := api.Scheme.DeepCopy(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list = append(list, objCopy.(runtime.Object))
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// List returns a list object, with its resource version set.
|
||||
func (f *FakeControllerSource) List(options api.ListOptions) (runtime.Object, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list, err := f.getListItemsLocked()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listObj := &api.List{}
|
||||
if err := meta.SetList(listObj, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objMeta, err := api.ListMetaFor(listObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resourceVersion := len(f.changes)
|
||||
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
||||
return listObj, nil
|
||||
}
|
||||
|
||||
// List returns a list object, with its resource version set.
|
||||
func (f *FakePVControllerSource) List(options api.ListOptions) (runtime.Object, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list, err := f.FakeControllerSource.getListItemsLocked()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listObj := &api.PersistentVolumeList{}
|
||||
if err := meta.SetList(listObj, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objMeta, err := api.ListMetaFor(listObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resourceVersion := len(f.changes)
|
||||
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
||||
return listObj, nil
|
||||
}
|
||||
|
||||
// List returns a list object, with its resource version set.
|
||||
func (f *FakePVCControllerSource) List(options api.ListOptions) (runtime.Object, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list, err := f.FakeControllerSource.getListItemsLocked()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listObj := &api.PersistentVolumeClaimList{}
|
||||
if err := meta.SetList(listObj, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objMeta, err := api.ListMetaFor(listObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resourceVersion := len(f.changes)
|
||||
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
||||
return listObj, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch, which will be pre-populated with all changes
|
||||
// after resourceVersion.
|
||||
func (f *FakeControllerSource) Watch(options api.ListOptions) (watch.Interface, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
rc, err := strconv.Atoi(options.ResourceVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rc < len(f.changes) {
|
||||
changes := []watch.Event{}
|
||||
for _, c := range f.changes[rc:] {
|
||||
// Must make a copy to allow clients to modify the
|
||||
// object. Otherwise, if they make a change and write
|
||||
// it back, they will inadvertently change the our
|
||||
// canonical copy (in addition to racing with other
|
||||
// clients).
|
||||
objCopy, err := api.Scheme.DeepCopy(c.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
changes = append(changes, watch.Event{Type: c.Type, Object: objCopy.(runtime.Object)})
|
||||
}
|
||||
return f.Broadcaster.WatchWithPrefix(changes), nil
|
||||
} else if rc > len(f.changes) {
|
||||
return nil, errors.New("resource version in the future not supported by this fake")
|
||||
}
|
||||
return f.Broadcaster.Watch(), nil
|
||||
}
|
||||
|
||||
// Shutdown closes the underlying broadcaster, waiting for events to be
|
||||
// delivered. It's an error to call any method after calling shutdown. This is
|
||||
// enforced by Shutdown() leaving f locked.
|
||||
func (f *FakeControllerSource) Shutdown() {
|
||||
f.lock.Lock() // Purposely no unlock.
|
||||
f.Broadcaster.Shutdown()
|
||||
}
|
|
@ -32,7 +32,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
|
@ -49,7 +48,7 @@ const ResourceResyncTime time.Duration = 0
|
|||
|
||||
type monitor struct {
|
||||
store cache.Store
|
||||
controller *framework.Controller
|
||||
controller *cache.Controller
|
||||
}
|
||||
|
||||
type objectReference struct {
|
||||
|
@ -488,11 +487,11 @@ func (gc *GarbageCollector) monitorFor(resource unversioned.GroupVersionResource
|
|||
}
|
||||
runtimeObject.GetObjectKind().SetGroupVersionKind(kind)
|
||||
}
|
||||
monitor.store, monitor.controller = framework.NewInformer(
|
||||
monitor.store, monitor.controller = cache.NewInformer(
|
||||
gcListWatcher(client, resource),
|
||||
nil,
|
||||
ResourceResyncTime,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
// add the event to the propagator's eventQueue.
|
||||
AddFunc: func(obj interface{}) {
|
||||
setObjectTypeMeta(obj)
|
||||
|
|
|
@ -21,13 +21,12 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
)
|
||||
|
||||
// PodInformer is type of SharedIndexInformer which watches and lists all pods.
|
||||
// Interface provides constructor for informer and lister for pods
|
||||
type PodInformer interface {
|
||||
Informer() framework.SharedIndexInformer
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.StoreToPodLister
|
||||
}
|
||||
|
||||
|
@ -37,7 +36,7 @@ type podInformer struct {
|
|||
|
||||
// Informer checks whether podInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
||||
// podInformer and connects it to sharedInformerFactory
|
||||
func (f *podInformer) Informer() framework.SharedIndexInformer {
|
||||
func (f *podInformer) Informer() cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
|
@ -63,7 +62,7 @@ func (f *podInformer) Lister() *cache.StoreToPodLister {
|
|||
// NamespaceInformer is type of SharedIndexInformer which watches and lists all namespaces.
|
||||
// Interface provides constructor for informer and lister for namsespaces
|
||||
type NamespaceInformer interface {
|
||||
Informer() framework.SharedIndexInformer
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.IndexerToNamespaceLister
|
||||
}
|
||||
|
||||
|
@ -73,7 +72,7 @@ type namespaceInformer struct {
|
|||
|
||||
// Informer checks whether namespaceInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
||||
// namespaceInformer and connects it to sharedInformerFactory
|
||||
func (f *namespaceInformer) Informer() framework.SharedIndexInformer {
|
||||
func (f *namespaceInformer) Informer() cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
|
@ -99,7 +98,7 @@ func (f *namespaceInformer) Lister() *cache.IndexerToNamespaceLister {
|
|||
// NodeInformer is type of SharedIndexInformer which watches and lists all nodes.
|
||||
// Interface provides constructor for informer and lister for nodes
|
||||
type NodeInformer interface {
|
||||
Informer() framework.SharedIndexInformer
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.StoreToNodeLister
|
||||
}
|
||||
|
||||
|
@ -109,7 +108,7 @@ type nodeInformer struct {
|
|||
|
||||
// Informer checks whether nodeInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
||||
// nodeInformer and connects it to sharedInformerFactory
|
||||
func (f *nodeInformer) Informer() framework.SharedIndexInformer {
|
||||
func (f *nodeInformer) Informer() cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
|
@ -135,7 +134,7 @@ func (f *nodeInformer) Lister() *cache.StoreToNodeLister {
|
|||
// PVCInformer is type of SharedIndexInformer which watches and lists all persistent volume claims.
|
||||
// Interface provides constructor for informer and lister for persistent volume claims
|
||||
type PVCInformer interface {
|
||||
Informer() framework.SharedIndexInformer
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.StoreToPVCFetcher
|
||||
}
|
||||
|
||||
|
@ -145,7 +144,7 @@ type pvcInformer struct {
|
|||
|
||||
// Informer checks whether pvcInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
||||
// pvcInformer and connects it to sharedInformerFactory
|
||||
func (f *pvcInformer) Informer() framework.SharedIndexInformer {
|
||||
func (f *pvcInformer) Informer() cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
|
@ -171,7 +170,7 @@ func (f *pvcInformer) Lister() *cache.StoreToPVCFetcher {
|
|||
// PVInformer is type of SharedIndexInformer which watches and lists all persistent volumes.
|
||||
// Interface provides constructor for informer and lister for persistent volumes
|
||||
type PVInformer interface {
|
||||
Informer() framework.SharedIndexInformer
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.StoreToPVFetcher
|
||||
}
|
||||
|
||||
|
@ -181,7 +180,7 @@ type pvInformer struct {
|
|||
|
||||
// Informer checks whether pvInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
||||
// pvInformer and connects it to sharedInformerFactory
|
||||
func (f *pvInformer) Informer() framework.SharedIndexInformer {
|
||||
func (f *pvInformer) Informer() cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
|
@ -24,7 +24,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
@ -47,7 +46,7 @@ type sharedInformerFactory struct {
|
|||
lock sync.Mutex
|
||||
defaultResync time.Duration
|
||||
|
||||
informers map[reflect.Type]framework.SharedIndexInformer
|
||||
informers map[reflect.Type]cache.SharedIndexInformer
|
||||
// startedInformers is used for tracking which informers have been started
|
||||
// this allows calling of Start method multiple times
|
||||
startedInformers map[reflect.Type]bool
|
||||
|
@ -58,7 +57,7 @@ func NewSharedInformerFactory(client clientset.Interface, defaultResync time.Dur
|
|||
return &sharedInformerFactory{
|
||||
client: client,
|
||||
defaultResync: defaultResync,
|
||||
informers: make(map[reflect.Type]framework.SharedIndexInformer),
|
||||
informers: make(map[reflect.Type]cache.SharedIndexInformer),
|
||||
startedInformers: make(map[reflect.Type]bool),
|
||||
}
|
||||
}
|
||||
|
@ -102,8 +101,8 @@ func (f *sharedInformerFactory) PersistentVolumes() PVInformer {
|
|||
}
|
||||
|
||||
// NewPodInformer returns a SharedIndexInformer that lists and watches all pods
|
||||
func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
|
||||
sharedIndexInformer := framework.NewSharedIndexInformer(
|
||||
func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().Pods(api.NamespaceAll).List(options)
|
||||
|
@ -121,8 +120,8 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) fram
|
|||
}
|
||||
|
||||
// NewNodeInformer returns a SharedIndexInformer that lists and watches all nodes
|
||||
func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
|
||||
sharedIndexInformer := framework.NewSharedIndexInformer(
|
||||
func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().Nodes().List(options)
|
||||
|
@ -139,8 +138,8 @@ func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) fra
|
|||
}
|
||||
|
||||
// NewPVCInformer returns a SharedIndexInformer that lists and watches all PVCs
|
||||
func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
|
||||
sharedIndexInformer := framework.NewSharedIndexInformer(
|
||||
func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
|
||||
|
@ -157,8 +156,8 @@ func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) fram
|
|||
}
|
||||
|
||||
// NewPVInformer returns a SharedIndexInformer that lists and watches all PVs
|
||||
func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
|
||||
sharedIndexInformer := framework.NewSharedIndexInformer(
|
||||
func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().PersistentVolumes().List(options)
|
||||
|
@ -175,8 +174,8 @@ func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) frame
|
|||
}
|
||||
|
||||
// NewNamespaceInformer returns a SharedIndexInformer that lists and watches namespaces
|
||||
func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
|
||||
sharedIndexInformer := framework.NewSharedIndexInformer(
|
||||
func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().Namespaces().List(options)
|
|
@ -31,8 +31,7 @@ import (
|
|||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
@ -51,7 +50,7 @@ type JobController struct {
|
|||
// we have a personal informer, we must start it ourselves. If you start
|
||||
// the controller using NewJobController(passing SharedInformer), this
|
||||
// will be null
|
||||
internalPodInformer framework.SharedInformer
|
||||
internalPodInformer cache.SharedInformer
|
||||
|
||||
// To allow injection of updateJobStatus for testing.
|
||||
updateHandler func(job *batch.Job) error
|
||||
|
@ -66,7 +65,7 @@ type JobController struct {
|
|||
// A store of job, populated by the jobController
|
||||
jobStore cache.StoreToJobLister
|
||||
// Watches changes to all jobs
|
||||
jobController *framework.Controller
|
||||
jobController *cache.Controller
|
||||
|
||||
// A store of pods, populated by the podController
|
||||
podStore cache.StoreToPodLister
|
||||
|
@ -77,7 +76,7 @@ type JobController struct {
|
|||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface) *JobController {
|
||||
func NewJobController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface) *JobController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
|
@ -98,7 +97,7 @@ func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clie
|
|||
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
|
||||
}
|
||||
|
||||
jm.jobStore.Store, jm.jobController = framework.NewInformer(
|
||||
jm.jobStore.Store, jm.jobController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return jm.kubeClient.Batch().Jobs(api.NamespaceAll).List(options)
|
||||
|
@ -110,7 +109,7 @@ func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clie
|
|||
&batch.Job{},
|
||||
// TODO: Can we have much longer period here?
|
||||
replicationcontroller.FullControllerResyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: jm.enqueueController,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
if job := cur.(*batch.Job); !IsJobFinished(job) {
|
||||
|
@ -121,7 +120,7 @@ func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clie
|
|||
},
|
||||
)
|
||||
|
||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
||||
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: jm.addPod,
|
||||
UpdateFunc: jm.updatePod,
|
||||
DeleteFunc: jm.deletePod,
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
|
@ -45,7 +44,7 @@ type NamespaceController struct {
|
|||
// store that holds the namespaces
|
||||
store cache.Store
|
||||
// controller that observes the namespaces
|
||||
controller *framework.Controller
|
||||
controller *cache.Controller
|
||||
// namespaces that have been queued up for processing by workers
|
||||
queue workqueue.RateLimitingInterface
|
||||
// list of preferred group versions and their corresponding resource set for namespace deletion
|
||||
|
@ -95,7 +94,7 @@ func NewNamespaceController(
|
|||
}
|
||||
|
||||
// configure the backing store/controller
|
||||
store, controller := framework.NewInformer(
|
||||
store, controller := cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Core().Namespaces().List(options)
|
||||
|
@ -106,7 +105,7 @@ func NewNamespaceController(
|
|||
},
|
||||
&api.Namespace{},
|
||||
resyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
namespace := obj.(*api.Namespace)
|
||||
namespaceController.enqueueNamespace(namespace)
|
||||
|
|
|
@ -33,8 +33,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
|
@ -136,13 +135,13 @@ type NodeController struct {
|
|||
maximumGracePeriod time.Duration
|
||||
recorder record.EventRecorder
|
||||
// Pod framework and store
|
||||
podController framework.ControllerInterface
|
||||
podController cache.ControllerInterface
|
||||
podStore cache.StoreToPodLister
|
||||
// Node framework and store
|
||||
nodeController *framework.Controller
|
||||
nodeController *cache.Controller
|
||||
nodeStore cache.StoreToNodeLister
|
||||
// DaemonSet framework and store
|
||||
daemonSetController *framework.Controller
|
||||
daemonSetController *cache.Controller
|
||||
daemonSetStore cache.StoreToDaemonSetLister
|
||||
// allocate/recycle CIDRs for node if allocateNodeCIDRs == true
|
||||
cidrAllocator CIDRAllocator
|
||||
|
@ -164,7 +163,7 @@ type NodeController struct {
|
|||
// we have a personal informer, we must start it ourselves. If you start
|
||||
// the controller using NewDaemonSetsController(passing SharedInformer), this
|
||||
// will be null
|
||||
internalPodInformer framework.SharedIndexInformer
|
||||
internalPodInformer cache.SharedIndexInformer
|
||||
}
|
||||
|
||||
// NewNodeController returns a new node controller to sync instances from cloudprovider.
|
||||
|
@ -172,7 +171,7 @@ type NodeController struct {
|
|||
// podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes
|
||||
// currently, this should be handled as a fatal error.
|
||||
func NewNodeController(
|
||||
podInformer framework.SharedIndexInformer,
|
||||
podInformer cache.SharedIndexInformer,
|
||||
cloud cloudprovider.Interface,
|
||||
kubeClient clientset.Interface,
|
||||
podEvictionTimeout time.Duration,
|
||||
|
@ -241,16 +240,16 @@ func NewNodeController(
|
|||
nc.enterFullDisruptionFunc = nc.HealthyQPSFunc
|
||||
nc.computeZoneStateFunc = nc.ComputeZoneState
|
||||
|
||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
||||
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: nc.maybeDeleteTerminatingPod,
|
||||
UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) },
|
||||
})
|
||||
nc.podStore.Indexer = podInformer.GetIndexer()
|
||||
nc.podController = podInformer.GetController()
|
||||
|
||||
nodeEventHandlerFuncs := framework.ResourceEventHandlerFuncs{}
|
||||
nodeEventHandlerFuncs := cache.ResourceEventHandlerFuncs{}
|
||||
if nc.allocateNodeCIDRs {
|
||||
nodeEventHandlerFuncs = framework.ResourceEventHandlerFuncs{
|
||||
nodeEventHandlerFuncs = cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
node := obj.(*api.Node)
|
||||
err := nc.cidrAllocator.AllocateOrOccupyCIDR(node)
|
||||
|
@ -296,7 +295,7 @@ func NewNodeController(
|
|||
}
|
||||
}
|
||||
|
||||
nc.nodeStore.Store, nc.nodeController = framework.NewInformer(
|
||||
nc.nodeStore.Store, nc.nodeController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return nc.kubeClient.Core().Nodes().List(options)
|
||||
|
@ -310,7 +309,7 @@ func NewNodeController(
|
|||
nodeEventHandlerFuncs,
|
||||
)
|
||||
|
||||
nc.daemonSetStore.Store, nc.daemonSetController = framework.NewInformer(
|
||||
nc.daemonSetStore.Store, nc.daemonSetController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return nc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options)
|
||||
|
@ -321,7 +320,7 @@ func NewNodeController(
|
|||
},
|
||||
&extensions.DaemonSet{},
|
||||
controller.NoResyncPeriodFunc(),
|
||||
framework.ResourceEventHandlerFuncs{},
|
||||
cache.ResourceEventHandlerFuncs{},
|
||||
)
|
||||
|
||||
if allocateNodeCIDRs {
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/record"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/errors"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
|
@ -63,12 +62,12 @@ type PetSetController struct {
|
|||
// podStoreSynced returns true if the pod store has synced at least once.
|
||||
podStoreSynced func() bool
|
||||
// Watches changes to all pods.
|
||||
podController framework.ControllerInterface
|
||||
podController cache.ControllerInterface
|
||||
|
||||
// A store of PetSets, populated by the psController.
|
||||
psStore cache.StoreToPetSetLister
|
||||
// Watches changes to all PetSets.
|
||||
psController *framework.Controller
|
||||
psController *cache.Controller
|
||||
|
||||
// A store of the 1 unhealthy pet blocking progress for a given ps
|
||||
blockingPetStore *unhealthyPetTracker
|
||||
|
@ -82,7 +81,7 @@ type PetSetController struct {
|
|||
}
|
||||
|
||||
// NewPetSetController creates a new petset controller.
|
||||
func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController {
|
||||
func NewPetSetController(podInformer cache.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||
|
@ -98,7 +97,7 @@ func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *
|
|||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "petset"),
|
||||
}
|
||||
|
||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
||||
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
// lookup the petset and enqueue
|
||||
AddFunc: psc.addPod,
|
||||
// lookup current and old petset if labels changed
|
||||
|
@ -109,7 +108,7 @@ func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *
|
|||
psc.podStore.Indexer = podInformer.GetIndexer()
|
||||
psc.podController = podInformer.GetController()
|
||||
|
||||
psc.psStore.Store, psc.psController = framework.NewInformer(
|
||||
psc.psStore.Store, psc.psController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options)
|
||||
|
@ -120,7 +119,7 @@ func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *
|
|||
},
|
||||
&apps.PetSet{},
|
||||
petSetResyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: psc.enqueuePetSet,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
oldPS := old.(*apps.PetSet)
|
||||
|
|
|
@ -33,7 +33,6 @@ import (
|
|||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
|
@ -61,14 +60,14 @@ type HorizontalController struct {
|
|||
// A store of HPA objects, populated by the controller.
|
||||
store cache.Store
|
||||
// Watches changes to all HPA objects.
|
||||
controller *framework.Controller
|
||||
controller *cache.Controller
|
||||
}
|
||||
|
||||
var downscaleForbiddenWindow = 5 * time.Minute
|
||||
var upscaleForbiddenWindow = 3 * time.Minute
|
||||
|
||||
func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, *framework.Controller) {
|
||||
return framework.NewInformer(
|
||||
func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, *cache.Controller) {
|
||||
return cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).List(options)
|
||||
|
@ -79,7 +78,7 @@ func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (
|
|||
},
|
||||
&autoscaling.HorizontalPodAutoscaler{},
|
||||
resyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
hpa := obj.(*autoscaling.HorizontalPodAutoscaler)
|
||||
hasCPUPolicy := hpa.Spec.TargetCPUUtilizationPercentage != nil
|
||||
|
|
6
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go
generated
vendored
|
@ -123,8 +123,8 @@ func (h *HeapsterMetricsClient) GetCpuConsumptionAndRequestInMillis(namespace st
|
|||
requestSum := int64(0)
|
||||
missing := false
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase == api.PodPending {
|
||||
// Skip pending pods.
|
||||
if pod.Status.Phase != api.PodRunning {
|
||||
// Count only running pods.
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -144,7 +144,7 @@ func (h *HeapsterMetricsClient) GetCpuConsumptionAndRequestInMillis(namespace st
|
|||
return 0, 0, time.Time{}, fmt.Errorf("some pods do not have request for cpu")
|
||||
}
|
||||
glog.V(4).Infof("%s %s - sum of CPU requested: %d", namespace, selector, requestSum)
|
||||
requestAvg := requestSum / int64(len(podList.Items))
|
||||
requestAvg := requestSum / int64(len(podNames))
|
||||
// Consumption is already averaged and in millis.
|
||||
consumption, timestamp, err := h.getCpuUtilizationForPods(namespace, selector, podNames)
|
||||
if err != nil {
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
|
@ -44,7 +43,7 @@ const (
|
|||
type PodGCController struct {
|
||||
kubeClient clientset.Interface
|
||||
podStore cache.StoreToPodLister
|
||||
podStoreSyncer *framework.Controller
|
||||
podStoreSyncer *cache.Controller
|
||||
deletePod func(namespace, name string) error
|
||||
threshold int
|
||||
}
|
||||
|
@ -63,7 +62,7 @@ func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFun
|
|||
|
||||
terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown))
|
||||
|
||||
gcc.podStore.Indexer, gcc.podStoreSyncer = framework.NewIndexerInformer(
|
||||
gcc.podStore.Indexer, gcc.podStoreSyncer = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = terminatedSelector
|
||||
|
@ -76,7 +75,7 @@ func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFun
|
|||
},
|
||||
&api.Pod{},
|
||||
resyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{},
|
||||
cache.ResourceEventHandlerFuncs{},
|
||||
// We don't need to build a index for podStore here actually, but build one for consistency.
|
||||
// It will ensure that if people start making use of the podStore in more specific ways,
|
||||
// they'll get the benefits they expect. It will also reserve the name for future refactorings.
|
||||
|
|
|
@ -36,8 +36,7 @@ import (
|
|||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
|
@ -81,7 +80,7 @@ type ReplicaSetController struct {
|
|||
// we have a personal informer, we must start it ourselves. If you start
|
||||
// the controller using NewReplicationManager(passing SharedInformer), this
|
||||
// will be null
|
||||
internalPodInformer framework.SharedIndexInformer
|
||||
internalPodInformer cache.SharedIndexInformer
|
||||
|
||||
// A ReplicaSet is temporarily suspended after creating/deleting these many replicas.
|
||||
// It resumes normal action after observing the watch events for them.
|
||||
|
@ -95,11 +94,11 @@ type ReplicaSetController struct {
|
|||
// A store of ReplicaSets, populated by the rsController
|
||||
rsStore cache.StoreToReplicaSetLister
|
||||
// Watches changes to all ReplicaSets
|
||||
rsController *framework.Controller
|
||||
rsController *cache.Controller
|
||||
// A store of pods, populated by the podController
|
||||
podStore cache.StoreToPodLister
|
||||
// Watches changes to all pods
|
||||
podController framework.ControllerInterface
|
||||
podController cache.ControllerInterface
|
||||
// podStoreSynced returns true if the pod store has been synced at least once.
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
podStoreSynced func() bool
|
||||
|
@ -115,7 +114,7 @@ type ReplicaSetController struct {
|
|||
}
|
||||
|
||||
// NewReplicaSetController creates a new ReplicaSetController.
|
||||
func NewReplicaSetController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
|
||||
func NewReplicaSetController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
|
@ -126,7 +125,7 @@ func NewReplicaSetController(podInformer framework.SharedIndexInformer, kubeClie
|
|||
}
|
||||
|
||||
// newReplicaSetController configures a replica set controller with the specified event recorder
|
||||
func newReplicaSetController(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
|
||||
func newReplicaSetController(eventRecorder record.EventRecorder, podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
|
||||
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
|
||||
}
|
||||
|
@ -143,7 +142,7 @@ func newReplicaSetController(eventRecorder record.EventRecorder, podInformer fra
|
|||
garbageCollectorEnabled: garbageCollectorEnabled,
|
||||
}
|
||||
|
||||
rsc.rsStore.Store, rsc.rsController = framework.NewInformer(
|
||||
rsc.rsStore.Store, rsc.rsController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rsc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
|
||||
|
@ -155,7 +154,7 @@ func newReplicaSetController(eventRecorder record.EventRecorder, podInformer fra
|
|||
&extensions.ReplicaSet{},
|
||||
// TODO: Can we have much longer period here?
|
||||
FullControllerResyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: rsc.enqueueReplicaSet,
|
||||
UpdateFunc: rsc.updateRS,
|
||||
// This will enter the sync loop and no-op, because the replica set has been deleted from the store.
|
||||
|
@ -165,7 +164,7 @@ func newReplicaSetController(eventRecorder record.EventRecorder, podInformer fra
|
|||
},
|
||||
)
|
||||
|
||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
||||
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: rsc.addPod,
|
||||
// This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like
|
||||
// overkill the most frequent pod update is status, and the associated ReplicaSet will only list from
|
||||
|
|
|
@ -34,8 +34,7 @@ import (
|
|||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
|
@ -86,7 +85,7 @@ type ReplicationManager struct {
|
|||
// we have a personal informer, we must start it ourselves. If you start
|
||||
// the controller using NewReplicationManager(passing SharedInformer), this
|
||||
// will be null
|
||||
internalPodInformer framework.SharedIndexInformer
|
||||
internalPodInformer cache.SharedIndexInformer
|
||||
|
||||
// An rc is temporarily suspended after creating/deleting these many replicas.
|
||||
// It resumes normal action after observing the watch events for them.
|
||||
|
@ -100,11 +99,11 @@ type ReplicationManager struct {
|
|||
// A store of replication controllers, populated by the rcController
|
||||
rcStore cache.StoreToReplicationControllerLister
|
||||
// Watches changes to all replication controllers
|
||||
rcController *framework.Controller
|
||||
rcController *cache.Controller
|
||||
// A store of pods, populated by the podController
|
||||
podStore cache.StoreToPodLister
|
||||
// Watches changes to all pods
|
||||
podController framework.ControllerInterface
|
||||
podController cache.ControllerInterface
|
||||
// podStoreSynced returns true if the pod store has been synced at least once.
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
podStoreSynced func() bool
|
||||
|
@ -120,7 +119,7 @@ type ReplicationManager struct {
|
|||
}
|
||||
|
||||
// NewReplicationManager creates a replication manager
|
||||
func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
|
||||
func NewReplicationManager(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
|
@ -130,7 +129,7 @@ func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient
|
|||
}
|
||||
|
||||
// newReplicationManager configures a replication manager with the specified event recorder
|
||||
func newReplicationManager(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
|
||||
func newReplicationManager(eventRecorder record.EventRecorder, podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
|
||||
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
|
||||
}
|
||||
|
@ -147,7 +146,7 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer frame
|
|||
garbageCollectorEnabled: garbageCollectorEnabled,
|
||||
}
|
||||
|
||||
rm.rcStore.Indexer, rm.rcController = framework.NewIndexerInformer(
|
||||
rm.rcStore.Indexer, rm.rcController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
|
||||
|
@ -159,7 +158,7 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer frame
|
|||
&api.ReplicationController{},
|
||||
// TODO: Can we have much longer period here?
|
||||
FullControllerResyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: rm.enqueueController,
|
||||
UpdateFunc: rm.updateRC,
|
||||
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
|
||||
|
@ -170,7 +169,7 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer frame
|
|||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
||||
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: rm.addPod,
|
||||
// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
|
||||
// the most frequent pod update is status, and the associated rc will only list from local storage, so
|
||||
|
|
37
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller.go
generated
vendored
37
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller.go
generated
vendored
|
@ -27,8 +27,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/quota/evaluator/core"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
@ -90,18 +89,18 @@ func ObjectReplenishmentDeleteFunc(options *ReplenishmentControllerOptions) func
|
|||
type ReplenishmentControllerFactory interface {
|
||||
// NewController returns a controller configured with the specified options.
|
||||
// This method is NOT thread-safe.
|
||||
NewController(options *ReplenishmentControllerOptions) (framework.ControllerInterface, error)
|
||||
NewController(options *ReplenishmentControllerOptions) (cache.ControllerInterface, error)
|
||||
}
|
||||
|
||||
// replenishmentControllerFactory implements ReplenishmentControllerFactory
|
||||
type replenishmentControllerFactory struct {
|
||||
kubeClient clientset.Interface
|
||||
podInformer framework.SharedInformer
|
||||
podInformer cache.SharedInformer
|
||||
}
|
||||
|
||||
// NewReplenishmentControllerFactory returns a factory that knows how to build controllers
|
||||
// to replenish resources when updated or deleted
|
||||
func NewReplenishmentControllerFactory(podInformer framework.SharedInformer, kubeClient clientset.Interface) ReplenishmentControllerFactory {
|
||||
func NewReplenishmentControllerFactory(podInformer cache.SharedInformer, kubeClient clientset.Interface) ReplenishmentControllerFactory {
|
||||
return &replenishmentControllerFactory{
|
||||
kubeClient: kubeClient,
|
||||
podInformer: podInformer,
|
||||
|
@ -112,8 +111,8 @@ func NewReplenishmentControllerFactoryFromClient(kubeClient clientset.Interface)
|
|||
return NewReplenishmentControllerFactory(nil, kubeClient)
|
||||
}
|
||||
|
||||
func (r *replenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (framework.ControllerInterface, error) {
|
||||
var result framework.ControllerInterface
|
||||
func (r *replenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (cache.ControllerInterface, error) {
|
||||
var result cache.ControllerInterface
|
||||
if r.kubeClient != nil && r.kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("replenishment_controller", r.kubeClient.Core().GetRESTClient().GetRateLimiter())
|
||||
}
|
||||
|
@ -121,7 +120,7 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
|||
switch options.GroupKind {
|
||||
case api.Kind("Pod"):
|
||||
if r.podInformer != nil {
|
||||
r.podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
||||
r.podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: PodReplenishmentUpdateFunc(options),
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
})
|
||||
|
@ -133,7 +132,7 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
|||
result = r.podInformer
|
||||
|
||||
case api.Kind("Service"):
|
||||
_, result = framework.NewInformer(
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().Services(api.NamespaceAll).List(options)
|
||||
|
@ -144,13 +143,13 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
|||
},
|
||||
&api.Service{},
|
||||
options.ResyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: ServiceReplenishmentUpdateFunc(options),
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
},
|
||||
)
|
||||
case api.Kind("ReplicationController"):
|
||||
_, result = framework.NewInformer(
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
|
||||
|
@ -161,12 +160,12 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
|||
},
|
||||
&api.ReplicationController{},
|
||||
options.ResyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
},
|
||||
)
|
||||
case api.Kind("PersistentVolumeClaim"):
|
||||
_, result = framework.NewInformer(
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
|
||||
|
@ -177,12 +176,12 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
|||
},
|
||||
&api.PersistentVolumeClaim{},
|
||||
options.ResyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
},
|
||||
)
|
||||
case api.Kind("Secret"):
|
||||
_, result = framework.NewInformer(
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().Secrets(api.NamespaceAll).List(options)
|
||||
|
@ -193,12 +192,12 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
|||
},
|
||||
&api.Secret{},
|
||||
options.ResyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
},
|
||||
)
|
||||
case api.Kind("ConfigMap"):
|
||||
_, result = framework.NewInformer(
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().ConfigMaps(api.NamespaceAll).List(options)
|
||||
|
@ -209,7 +208,7 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
|||
},
|
||||
&api.ConfigMap{},
|
||||
options.ResyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
},
|
||||
)
|
||||
|
@ -254,7 +253,7 @@ func IsUnhandledGroupKindError(err error) bool {
|
|||
// returning the first success or failure it hits. If there are no hits either way, it return an UnhandledGroupKind error
|
||||
type UnionReplenishmentControllerFactory []ReplenishmentControllerFactory
|
||||
|
||||
func (f UnionReplenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (framework.ControllerInterface, error) {
|
||||
func (f UnionReplenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (cache.ControllerInterface, error) {
|
||||
for _, factory := range f {
|
||||
controller, err := factory.NewController(options)
|
||||
if !IsUnhandledGroupKindError(err) {
|
||||
|
|
11
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go
generated
vendored
|
@ -26,7 +26,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
@ -60,7 +59,7 @@ type ResourceQuotaController struct {
|
|||
// An index of resource quota objects by namespace
|
||||
rqIndexer cache.Indexer
|
||||
// Watches changes to all resource quota
|
||||
rqController *framework.Controller
|
||||
rqController *cache.Controller
|
||||
// ResourceQuota objects that need to be synchronized
|
||||
queue workqueue.RateLimitingInterface
|
||||
// missingUsageQueue holds objects that are missing the initial usage informatino
|
||||
|
@ -72,7 +71,7 @@ type ResourceQuotaController struct {
|
|||
// knows how to calculate usage
|
||||
registry quota.Registry
|
||||
// controllers monitoring to notify for replenishment
|
||||
replenishmentControllers []framework.ControllerInterface
|
||||
replenishmentControllers []cache.ControllerInterface
|
||||
}
|
||||
|
||||
func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *ResourceQuotaController {
|
||||
|
@ -83,7 +82,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
|
|||
missingUsageQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_priority"),
|
||||
resyncPeriod: options.ResyncPeriod,
|
||||
registry: options.Registry,
|
||||
replenishmentControllers: []framework.ControllerInterface{},
|
||||
replenishmentControllers: []cache.ControllerInterface{},
|
||||
}
|
||||
if options.KubeClient != nil && options.KubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", options.KubeClient.Core().GetRESTClient().GetRateLimiter())
|
||||
|
@ -92,7 +91,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
|
|||
rq.syncHandler = rq.syncResourceQuotaFromKey
|
||||
|
||||
// build the controller that observes quota
|
||||
rq.rqIndexer, rq.rqController = framework.NewIndexerInformer(
|
||||
rq.rqIndexer, rq.rqController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).List(options)
|
||||
|
@ -103,7 +102,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
|
|||
},
|
||||
&api.ResourceQuota{},
|
||||
rq.resyncPeriod(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: rq.addQuota,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
// We are only interested in observing updates to quota.spec to drive updates to quota.status.
|
||||
|
|
|
@ -80,7 +80,7 @@ func (rc *RouteController) reconcileNodeRoutes() error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("error listing routes: %v", err)
|
||||
}
|
||||
// TODO (cjcullen): use pkg/controller/framework.NewInformer to watch this
|
||||
// TODO (cjcullen): use pkg/controller/cache.NewInformer to watch this
|
||||
// and reduce the number of lists needed.
|
||||
nodeList, err := rc.kubeClient.Core().Nodes().List(api.ListOptions{})
|
||||
if err != nil {
|
||||
|
|
|
@ -33,7 +33,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
@ -88,7 +87,7 @@ type ServiceController struct {
|
|||
// A store of services, populated by the serviceController
|
||||
serviceStore cache.StoreToServiceLister
|
||||
// Watches changes to all services
|
||||
serviceController *framework.Controller
|
||||
serviceController *cache.Controller
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
nodeLister cache.StoreToNodeLister
|
||||
|
@ -120,7 +119,7 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
|
|||
},
|
||||
workingQueue: workqueue.NewDelayingQueue(),
|
||||
}
|
||||
s.serviceStore.Store, s.serviceController = framework.NewInformer(
|
||||
s.serviceStore.Store, s.serviceController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
|
||||
return s.kubeClient.Core().Services(api.NamespaceAll).List(options)
|
||||
|
@ -131,7 +130,7 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
|
|||
},
|
||||
&api.Service{},
|
||||
serviceSyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: s.enqueueService,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
oldSvc, ok1 := old.(*api.Service)
|
||||
|
|
13
vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller.go
generated
vendored
|
@ -26,7 +26,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
@ -80,7 +79,7 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount
|
|||
// If we're maintaining a single account, we can scope the accounts we watch to just that name
|
||||
accountSelector = fields.SelectorFromSet(map[string]string{api.ObjectNameField: options.ServiceAccounts[0].Name})
|
||||
}
|
||||
e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
|
||||
e.serviceAccounts, e.serviceAccountController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = accountSelector
|
||||
|
@ -93,13 +92,13 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount
|
|||
},
|
||||
&api.ServiceAccount{},
|
||||
options.ServiceAccountResync,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: e.serviceAccountDeleted,
|
||||
},
|
||||
cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
e.namespaces, e.namespaceController = framework.NewIndexerInformer(
|
||||
e.namespaces, e.namespaceController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Core().Namespaces().List(options)
|
||||
|
@ -110,7 +109,7 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount
|
|||
},
|
||||
&api.Namespace{},
|
||||
options.NamespaceResync,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: e.namespaceAdded,
|
||||
UpdateFunc: e.namespaceUpdated,
|
||||
},
|
||||
|
@ -131,8 +130,8 @@ type ServiceAccountsController struct {
|
|||
namespaces cache.Indexer
|
||||
|
||||
// Since we join two objects, we'll watch both of them with controllers.
|
||||
serviceAccountController *framework.Controller
|
||||
namespaceController *framework.Controller
|
||||
serviceAccountController *cache.Controller
|
||||
namespaceController *cache.Controller
|
||||
}
|
||||
|
||||
// Runs controller loops and returns immediately
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/registry/secret"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
|
@ -90,7 +89,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
|
|||
metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().GetRESTClient().GetRateLimiter())
|
||||
}
|
||||
|
||||
e.serviceAccounts, e.serviceAccountController = framework.NewInformer(
|
||||
e.serviceAccounts, e.serviceAccountController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
|
||||
|
@ -101,7 +100,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
|
|||
},
|
||||
&api.ServiceAccount{},
|
||||
options.ServiceAccountResync,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: e.queueServiceAccountSync,
|
||||
UpdateFunc: e.queueServiceAccountUpdateSync,
|
||||
DeleteFunc: e.queueServiceAccountSync,
|
||||
|
@ -109,7 +108,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
|
|||
)
|
||||
|
||||
tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
|
||||
e.secrets, e.secretController = framework.NewIndexerInformer(
|
||||
e.secrets, e.secretController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = tokenSelector
|
||||
|
@ -122,7 +121,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
|
|||
},
|
||||
&api.Secret{},
|
||||
options.SecretResync,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: e.queueSecretSync,
|
||||
UpdateFunc: e.queueSecretUpdateSync,
|
||||
DeleteFunc: e.queueSecretSync,
|
||||
|
@ -144,8 +143,8 @@ type TokensController struct {
|
|||
secrets cache.Indexer
|
||||
|
||||
// Since we join two objects, we'll watch both of them with controllers.
|
||||
serviceAccountController *framework.Controller
|
||||
secretController *framework.Controller
|
||||
serviceAccountController *cache.Controller
|
||||
secretController *cache.Controller
|
||||
|
||||
// syncServiceAccountQueue handles service account events:
|
||||
// * ensures a referenced token exists for service accounts which still exist
|
||||
|
|
18
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
generated
vendored
|
@ -25,10 +25,10 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
kcache "k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler"
|
||||
|
@ -66,10 +66,10 @@ type AttachDetachController interface {
|
|||
// NewAttachDetachController returns a new instance of AttachDetachController.
|
||||
func NewAttachDetachController(
|
||||
kubeClient internalclientset.Interface,
|
||||
podInformer framework.SharedInformer,
|
||||
nodeInformer framework.SharedInformer,
|
||||
pvcInformer framework.SharedInformer,
|
||||
pvInformer framework.SharedInformer,
|
||||
podInformer kcache.SharedInformer,
|
||||
nodeInformer kcache.SharedInformer,
|
||||
pvcInformer kcache.SharedInformer,
|
||||
pvInformer kcache.SharedInformer,
|
||||
cloud cloudprovider.Interface,
|
||||
plugins []volume.VolumePlugin,
|
||||
recorder record.EventRecorder) (AttachDetachController, error) {
|
||||
|
@ -94,13 +94,13 @@ func NewAttachDetachController(
|
|||
cloud: cloud,
|
||||
}
|
||||
|
||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
||||
podInformer.AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
||||
AddFunc: adc.podAdd,
|
||||
UpdateFunc: adc.podUpdate,
|
||||
DeleteFunc: adc.podDelete,
|
||||
})
|
||||
|
||||
nodeInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
||||
nodeInformer.AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
||||
AddFunc: adc.nodeAdd,
|
||||
UpdateFunc: adc.nodeUpdate,
|
||||
DeleteFunc: adc.nodeDelete,
|
||||
|
@ -143,12 +143,12 @@ type attachDetachController struct {
|
|||
// pvcInformer is the shared PVC informer used to fetch and store PVC
|
||||
// objects from the API server. It is shared with other controllers and
|
||||
// therefore the PVC objects in its store should be treated as immutable.
|
||||
pvcInformer framework.SharedInformer
|
||||
pvcInformer kcache.SharedInformer
|
||||
|
||||
// pvInformer is the shared PV informer used to fetch and store PV objects
|
||||
// from the API server. It is shared with other controllers and therefore
|
||||
// the PV objects in its store should be treated as immutable.
|
||||
pvInformer framework.SharedInformer
|
||||
pvInformer kcache.SharedInformer
|
||||
|
||||
// cloud provider used by volume host
|
||||
cloud cloudprovider.Interface
|
||||
|
|
|
@ -66,12 +66,12 @@ type ActualStateOfWorld interface {
|
|||
// the specified volume, an error is returned.
|
||||
SetVolumeMountedByNode(volumeName api.UniqueVolumeName, nodeName string, mounted bool) error
|
||||
|
||||
// ResetNodeStatusUpdateNeeded resets statusUpdateNeeded for the specified
|
||||
// node to false indicating the AttachedVolume field of the Node's Status
|
||||
// object has been updated.
|
||||
// If no node with the name nodeName exists in list of attached nodes for
|
||||
// the specified volume, an error is returned.
|
||||
ResetNodeStatusUpdateNeeded(nodeName string) error
|
||||
// SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified
|
||||
// node to true indicating the AttachedVolume field in the Node's Status
|
||||
// object needs to be updated by the node updater again.
|
||||
// If the specifed node does not exist in the nodesToUpdateStatusFor list,
|
||||
// log the error and return
|
||||
SetNodeStatusUpdateNeeded(nodeName string)
|
||||
|
||||
// ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach
|
||||
// request any more for the volume
|
||||
|
@ -278,8 +278,17 @@ func (asw *actualStateOfWorld) AddVolumeNode(
|
|||
nodesAttachedTo: make(map[string]nodeAttachedTo),
|
||||
devicePath: devicePath,
|
||||
}
|
||||
asw.attachedVolumes[volumeName] = volumeObj
|
||||
} else {
|
||||
// If volume object already exists, it indicates that the information would be out of date.
|
||||
// Update the fields for volume object except the nodes attached to the volumes.
|
||||
volumeObj.devicePath = devicePath
|
||||
volumeObj.spec = volumeSpec
|
||||
glog.V(2).Infof("Volume %q is already added to attachedVolume list to node %q, update device path %q",
|
||||
volumeName,
|
||||
nodeName,
|
||||
devicePath)
|
||||
}
|
||||
asw.attachedVolumes[volumeName] = volumeObj
|
||||
|
||||
_, nodeExists := volumeObj.nodesAttachedTo[nodeName]
|
||||
if !nodeExists {
|
||||
|
@ -322,7 +331,7 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode(
|
|||
|
||||
nodeObj.mountedByNode = mounted
|
||||
volumeObj.nodesAttachedTo[nodeName] = nodeObj
|
||||
glog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %q",
|
||||
glog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %t",
|
||||
volumeName,
|
||||
nodeName,
|
||||
mounted)
|
||||
|
@ -433,21 +442,28 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
|
|||
}
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) ResetNodeStatusUpdateNeeded(
|
||||
nodeName string) error {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
// Remove volume from volumes to report as attached
|
||||
// Update the flag statusUpdateNeeded to indicate whether node status is already updated or
|
||||
// needs to be updated again by the node status updater.
|
||||
// If the specifed node does not exist in the nodesToUpdateStatusFor list, log the error and return
|
||||
// This is an internal function and caller should acquire and release the lock
|
||||
func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName string, needed bool) {
|
||||
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
|
||||
if !nodeToUpdateExists {
|
||||
return fmt.Errorf(
|
||||
"failed to ResetNodeStatusUpdateNeeded(nodeName=%q) nodeName does not exist",
|
||||
// should not happen
|
||||
glog.Errorf(
|
||||
"Failed to set statusUpdateNeeded to needed %t because nodeName=%q does not exist",
|
||||
needed,
|
||||
nodeName)
|
||||
}
|
||||
|
||||
nodeToUpdate.statusUpdateNeeded = false
|
||||
nodeToUpdate.statusUpdateNeeded = needed
|
||||
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
|
||||
return nil
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName string) {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
asw.updateNodeStatusUpdateNeeded(nodeName, true)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) DeleteVolumeNode(
|
||||
|
@ -529,7 +545,7 @@ func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[string][]api.Att
|
|||
defer asw.RUnlock()
|
||||
|
||||
volumesToReportAttached := make(map[string][]api.AttachedVolume)
|
||||
for _, nodeToUpdateObj := range asw.nodesToUpdateStatusFor {
|
||||
for nodeName, nodeToUpdateObj := range asw.nodesToUpdateStatusFor {
|
||||
if nodeToUpdateObj.statusUpdateNeeded {
|
||||
attachedVolumes := make(
|
||||
[]api.AttachedVolume,
|
||||
|
@ -544,6 +560,10 @@ func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[string][]api.Att
|
|||
}
|
||||
volumesToReportAttached[nodeToUpdateObj.nodeName] = attachedVolumes
|
||||
}
|
||||
// When GetVolumesToReportAttached is called by node status updater, the current status
|
||||
// of this node will be updated, so set the flag statusUpdateNeeded to false indicating
|
||||
// the current status is already updated.
|
||||
asw.updateNodeStatusUpdateNeeded(nodeName, false)
|
||||
}
|
||||
|
||||
return volumesToReportAttached
|
||||
|
@ -557,6 +577,7 @@ func getAttachedVolume(
|
|||
VolumeName: attachedVolume.volumeName,
|
||||
VolumeSpec: attachedVolume.spec,
|
||||
NodeName: nodeAttachedTo.nodeName,
|
||||
DevicePath: attachedVolume.devicePath,
|
||||
PluginIsAttachable: true,
|
||||
},
|
||||
MountedByNode: nodeAttachedTo.mountedByNode,
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
kcache "k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
|
@ -48,7 +47,7 @@ type DesiredStateOfWorldPopulator interface {
|
|||
// desiredStateOfWorld - the cache to populate
|
||||
func NewDesiredStateOfWorldPopulator(
|
||||
loopSleepDuration time.Duration,
|
||||
podInformer framework.SharedInformer,
|
||||
podInformer kcache.SharedInformer,
|
||||
desiredStateOfWorld cache.DesiredStateOfWorld) DesiredStateOfWorldPopulator {
|
||||
return &desiredStateOfWorldPopulator{
|
||||
loopSleepDuration: loopSleepDuration,
|
||||
|
@ -59,7 +58,7 @@ func NewDesiredStateOfWorldPopulator(
|
|||
|
||||
type desiredStateOfWorldPopulator struct {
|
||||
loopSleepDuration time.Duration
|
||||
podInformer framework.SharedInformer
|
||||
podInformer kcache.SharedInformer
|
||||
desiredStateOfWorld cache.DesiredStateOfWorld
|
||||
}
|
||||
|
||||
|
|
|
@ -25,8 +25,8 @@ import (
|
|||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
kcache "k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/util/strategicpatch"
|
||||
)
|
||||
|
@ -42,7 +42,7 @@ type NodeStatusUpdater interface {
|
|||
// NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.
|
||||
func NewNodeStatusUpdater(
|
||||
kubeClient internalclientset.Interface,
|
||||
nodeInformer framework.SharedInformer,
|
||||
nodeInformer kcache.SharedInformer,
|
||||
actualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater {
|
||||
return &nodeStatusUpdater{
|
||||
actualStateOfWorld: actualStateOfWorld,
|
||||
|
@ -53,7 +53,7 @@ func NewNodeStatusUpdater(
|
|||
|
||||
type nodeStatusUpdater struct {
|
||||
kubeClient internalclientset.Interface
|
||||
nodeInformer framework.SharedInformer
|
||||
nodeInformer kcache.SharedInformer
|
||||
actualStateOfWorld cache.ActualStateOfWorld
|
||||
}
|
||||
|
||||
|
@ -107,20 +107,15 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
|
|||
|
||||
_, err = nsu.kubeClient.Core().Nodes().PatchStatus(nodeName, patchBytes)
|
||||
if err != nil {
|
||||
// If update node status fails, reset flag statusUpdateNeeded back to true
|
||||
// to indicate this node status needs to be udpated again
|
||||
nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)
|
||||
return fmt.Errorf(
|
||||
"failed to kubeClient.Core().Nodes().Patch for node %q. %v",
|
||||
nodeName,
|
||||
err)
|
||||
}
|
||||
|
||||
err = nsu.actualStateOfWorld.ResetNodeStatusUpdateNeeded(nodeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed to ResetNodeStatusUpdateNeeded for node %q. %v",
|
||||
nodeName,
|
||||
err)
|
||||
}
|
||||
|
||||
glog.V(3).Infof(
|
||||
"Updating status for node %q succeeded. patchBytes: %q",
|
||||
nodeName,
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/conversion"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
|
@ -150,12 +149,12 @@ const createProvisionedPVInterval = 10 * time.Second
|
|||
|
||||
// PersistentVolumeController is a controller that synchronizes
|
||||
// PersistentVolumeClaims and PersistentVolumes. It starts two
|
||||
// framework.Controllers that watch PersistentVolume and PersistentVolumeClaim
|
||||
// cache.Controllers that watch PersistentVolume and PersistentVolumeClaim
|
||||
// changes.
|
||||
type PersistentVolumeController struct {
|
||||
volumeController *framework.Controller
|
||||
volumeController *cache.Controller
|
||||
volumeSource cache.ListerWatcher
|
||||
claimController *framework.Controller
|
||||
claimController *cache.Controller
|
||||
claimSource cache.ListerWatcher
|
||||
classReflector *cache.Reflector
|
||||
classSource cache.ListerWatcher
|
||||
|
@ -191,7 +190,7 @@ type PersistentVolumeController struct {
|
|||
}
|
||||
|
||||
// syncClaim is the main controller method to decide what to do with a claim.
|
||||
// It's invoked by appropriate framework.Controller callbacks when a claim is
|
||||
// It's invoked by appropriate cache.Controller callbacks when a claim is
|
||||
// created, updated or periodically synced. We do not differentiate between
|
||||
// these events.
|
||||
// For easier readability, it was split into syncUnboundClaim and syncBoundClaim
|
||||
|
@ -381,7 +380,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu
|
|||
}
|
||||
|
||||
// syncVolume is the main controller method to decide what to do with a volume.
|
||||
// It's invoked by appropriate framework.Controller callbacks when a volume is
|
||||
// It's invoked by appropriate cache.Controller callbacks when a volume is
|
||||
// created, updated or periodically synced. We do not differentiate between
|
||||
// these events.
|
||||
func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) error {
|
||||
|
@ -913,7 +912,6 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *api.PersistentVolum
|
|||
// Update the status
|
||||
_, err = ctrl.updateVolumePhase(newVol, api.VolumeAvailable, "")
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// reclaimVolume implements volume.Spec.PersistentVolumeReclaimPolicy and
|
||||
|
@ -996,7 +994,8 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{})
|
|||
}
|
||||
|
||||
// Plugin found
|
||||
recycler, err := plugin.NewRecycler(volume.Name, spec)
|
||||
recorder := ctrl.newRecyclerEventRecorder(volume)
|
||||
recycler, err := plugin.NewRecycler(volume.Name, spec, recorder)
|
||||
if err != nil {
|
||||
// Cannot create recycler
|
||||
strerr := fmt.Sprintf("Failed to create recycler: %v", err)
|
||||
|
@ -1024,6 +1023,8 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{})
|
|||
}
|
||||
|
||||
glog.V(2).Infof("volume %q recycled", volume.Name)
|
||||
// Send an event
|
||||
ctrl.eventRecorder.Event(volume, api.EventTypeNormal, "VolumeRecycled", "Volume recycled")
|
||||
// Make the volume available again
|
||||
if err = ctrl.unbindVolume(volume); err != nil {
|
||||
// Oops, could not save the volume and therefore the controller will
|
||||
|
@ -1366,6 +1367,17 @@ func (ctrl *PersistentVolumeController) scheduleOperation(operationName string,
|
|||
}
|
||||
}
|
||||
|
||||
// newRecyclerEventRecorder returns a RecycleEventRecorder that sends all events
|
||||
// to given volume.
|
||||
func (ctrl *PersistentVolumeController) newRecyclerEventRecorder(volume *api.PersistentVolume) vol.RecycleEventRecorder {
|
||||
return func(eventtype, message string) {
|
||||
ctrl.eventRecorder.Eventf(volume, eventtype, "RecyclerPod", "Recycler pod: %s", message)
|
||||
}
|
||||
}
|
||||
|
||||
// findProvisionablePlugin finds a provisioner plugin for a given claim.
|
||||
// It returns either the provisioning plugin or nil when an external
|
||||
// provisioner is requested.
|
||||
func (ctrl *PersistentVolumeController) findProvisionablePlugin(claim *api.PersistentVolumeClaim) (vol.ProvisionableVolumePlugin, *storage.StorageClass, error) {
|
||||
// TODO: remove this alpha behavior in 1.5
|
||||
alpha := hasAnnotation(claim.ObjectMeta, annAlphaClass)
|
||||
|
|
25
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_base.go
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_base.go
generated
vendored
|
@ -30,7 +30,6 @@ import (
|
|||
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/conversion"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||
|
@ -65,7 +64,7 @@ func NewPersistentVolumeController(
|
|||
|
||||
controller := &PersistentVolumeController{
|
||||
volumes: newPersistentVolumeOrderedIndex(),
|
||||
claims: cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc),
|
||||
claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
|
||||
kubeClient: kubeClient,
|
||||
eventRecorder: eventRecorder,
|
||||
runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */),
|
||||
|
@ -120,22 +119,22 @@ func NewPersistentVolumeController(
|
|||
}
|
||||
controller.classSource = classSource
|
||||
|
||||
_, controller.volumeController = framework.NewIndexerInformer(
|
||||
_, controller.volumeController = cache.NewIndexerInformer(
|
||||
volumeSource,
|
||||
&api.PersistentVolume{},
|
||||
syncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: controller.addVolume,
|
||||
UpdateFunc: controller.updateVolume,
|
||||
DeleteFunc: controller.deleteVolume,
|
||||
},
|
||||
cache.Indexers{"accessmodes": accessModesIndexFunc},
|
||||
)
|
||||
_, controller.claimController = framework.NewInformer(
|
||||
_, controller.claimController = cache.NewInformer(
|
||||
claimSource,
|
||||
&api.PersistentVolumeClaim{},
|
||||
syncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: controller.addClaim,
|
||||
UpdateFunc: controller.updateClaim,
|
||||
DeleteFunc: controller.deleteClaim,
|
||||
|
@ -144,7 +143,7 @@ func NewPersistentVolumeController(
|
|||
|
||||
// This is just a cache of StorageClass instances, no special actions are
|
||||
// needed when a class is created/deleted/updated.
|
||||
controller.classes = cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)
|
||||
controller.classes = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
|
||||
controller.classReflector = cache.NewReflector(
|
||||
classSource,
|
||||
&storage.StorageClass{},
|
||||
|
@ -212,7 +211,7 @@ func (ctrl *PersistentVolumeController) storeClaimUpdate(claim *api.PersistentVo
|
|||
return storeObjectUpdate(ctrl.claims, claim, "claim")
|
||||
}
|
||||
|
||||
// addVolume is callback from framework.Controller watching PersistentVolume
|
||||
// addVolume is callback from cache.Controller watching PersistentVolume
|
||||
// events.
|
||||
func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
|
||||
pv, ok := obj.(*api.PersistentVolume)
|
||||
|
@ -247,7 +246,7 @@ func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
// updateVolume is callback from framework.Controller watching PersistentVolume
|
||||
// updateVolume is callback from cache.Controller watching PersistentVolume
|
||||
// events.
|
||||
func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) {
|
||||
newVolume, ok := newObj.(*api.PersistentVolume)
|
||||
|
@ -282,7 +281,7 @@ func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{})
|
|||
}
|
||||
}
|
||||
|
||||
// deleteVolume is callback from framework.Controller watching PersistentVolume
|
||||
// deleteVolume is callback from cache.Controller watching PersistentVolume
|
||||
// events.
|
||||
func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) {
|
||||
_ = ctrl.volumes.store.Delete(obj)
|
||||
|
@ -330,7 +329,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
// addClaim is callback from framework.Controller watching PersistentVolumeClaim
|
||||
// addClaim is callback from cache.Controller watching PersistentVolumeClaim
|
||||
// events.
|
||||
func (ctrl *PersistentVolumeController) addClaim(obj interface{}) {
|
||||
// Store the new claim version in the cache and do not process it if this is
|
||||
|
@ -360,7 +359,7 @@ func (ctrl *PersistentVolumeController) addClaim(obj interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
// updateClaim is callback from framework.Controller watching PersistentVolumeClaim
|
||||
// updateClaim is callback from cache.Controller watching PersistentVolumeClaim
|
||||
// events.
|
||||
func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) {
|
||||
// Store the new claim version in the cache and do not process it if this is
|
||||
|
@ -390,7 +389,7 @@ func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{})
|
|||
}
|
||||
}
|
||||
|
||||
// deleteClaim is callback from framework.Controller watching PersistentVolumeClaim
|
||||
// deleteClaim is callback from cache.Controller watching PersistentVolumeClaim
|
||||
// events.
|
||||
func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) {
|
||||
_ = ctrl.claims.Delete(obj)
|
||||
|
|
|
@ -502,6 +502,7 @@ func (s *GenericAPIServer) init(c *Config) {
|
|||
|
||||
attributeGetter := apiserver.NewRequestAttributeGetter(s.RequestContextMapper, s.NewRequestInfoResolver())
|
||||
handler = apiserver.WithAuthorizationCheck(handler, attributeGetter, s.authorizer)
|
||||
handler = apiserver.WithImpersonation(handler, s.RequestContextMapper, s.authorizer)
|
||||
if len(c.AuditLogPath) != 0 {
|
||||
// audit handler must comes before the impersonationFilter to read the original user
|
||||
writer := &lumberjack.Logger{
|
||||
|
@ -511,9 +512,7 @@ func (s *GenericAPIServer) init(c *Config) {
|
|||
MaxSize: c.AuditLogMaxSize,
|
||||
}
|
||||
handler = audit.WithAudit(handler, attributeGetter, writer)
|
||||
defer writer.Close()
|
||||
}
|
||||
handler = apiserver.WithImpersonation(handler, s.RequestContextMapper, s.authorizer)
|
||||
|
||||
// Install Authenticator
|
||||
if c.Authenticator != nil {
|
||||
|
|
|
@ -38,8 +38,6 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
DefaultDeserializationCacheSize = 50000
|
||||
|
||||
// TODO: This can be tightened up. It still matches objects named watch or proxy.
|
||||
defaultLongRunningRequestRE = "(/|^)((watch|proxy)(/|$)|(logs?|portforward|exec|attach)/?$)"
|
||||
)
|
||||
|
@ -157,7 +155,9 @@ func NewServerRunOptions() *ServerRunOptions {
|
|||
func (o *ServerRunOptions) WithEtcdOptions() *ServerRunOptions {
|
||||
o.StorageConfig = storagebackend.Config{
|
||||
Prefix: DefaultEtcdPathPrefix,
|
||||
DeserializationCacheSize: DefaultDeserializationCacheSize,
|
||||
// Default cache size to 0 - if unset, its size will be set based on target
|
||||
// memory usage.
|
||||
DeserializationCacheSize: 0,
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
|
|
@ -46,14 +46,17 @@ type Tunneler interface {
|
|||
}
|
||||
|
||||
type SSHTunneler struct {
|
||||
// Important: Since these two int64 fields are using sync/atomic, they have to be at the top of the struct due to a bug on 32-bit platforms
|
||||
// See: https://golang.org/pkg/sync/atomic/ for more information
|
||||
lastSync int64 // Seconds since Epoch
|
||||
lastSSHKeySync int64 // Seconds since Epoch
|
||||
|
||||
SSHUser string
|
||||
SSHKeyfile string
|
||||
InstallSSHKey InstallSSHKey
|
||||
HealthCheckURL *url.URL
|
||||
|
||||
tunnels *ssh.SSHTunnelList
|
||||
lastSync int64 // Seconds since Epoch
|
||||
lastSSHKeySync int64 // Seconds since Epoch
|
||||
lastSyncMetric prometheus.GaugeFunc
|
||||
clock clock.Clock
|
||||
|
||||
|
|
|
@ -158,9 +158,6 @@ const (
|
|||
// Period for performing image garbage collection.
|
||||
ImageGCPeriod = 5 * time.Minute
|
||||
|
||||
// maxImagesInStatus is the number of max images we store in image status.
|
||||
maxImagesInNodeStatus = 50
|
||||
|
||||
// Minimum number of dead containers to keep in a pod
|
||||
minDeadContainerInPod = 1
|
||||
)
|
||||
|
|
|
@ -39,6 +39,15 @@ import (
|
|||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxImagesInNodeStatus is the number of max images we store in image status.
|
||||
maxImagesInNodeStatus = 50
|
||||
|
||||
// maxNamesPerImageInNodeStatus is max number of names per image stored in
|
||||
// the node status.
|
||||
maxNamesPerImageInNodeStatus = 5
|
||||
)
|
||||
|
||||
// registerWithApiServer registers the node with the cluster master. It is safe
|
||||
// to call multiple times, but not concurrently (kl.registrationCompleted is
|
||||
// not locked).
|
||||
|
@ -501,8 +510,13 @@ func (kl *Kubelet) setNodeStatusImages(node *api.Node) {
|
|||
}
|
||||
|
||||
for _, image := range containerImages {
|
||||
names := append(image.RepoDigests, image.RepoTags...)
|
||||
// Report up to maxNamesPerImageInNodeStatus names per image.
|
||||
if len(names) > maxNamesPerImageInNodeStatus {
|
||||
names = names[0:maxNamesPerImageInNodeStatus]
|
||||
}
|
||||
imagesOnNode = append(imagesOnNode, api.ContainerImage{
|
||||
Names: append(image.RepoTags, image.RepoDigests...),
|
||||
Names: names,
|
||||
SizeBytes: image.Size,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -40,6 +40,12 @@ func (kl *Kubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume
|
|||
podVolumes := kl.volumeManager.GetMountedVolumesForPod(
|
||||
volumetypes.UniquePodName(podUID))
|
||||
for outerVolumeSpecName, volume := range podVolumes {
|
||||
// TODO: volume.Mounter could be nil if volume object is recovered
|
||||
// from reconciler's sync state process. PR 33616 will fix this problem
|
||||
// to create Mounter object when recovering volume state.
|
||||
if volume.Mounter == nil {
|
||||
continue
|
||||
}
|
||||
volumesToReturn[outerVolumeSpecName] = volume.Mounter
|
||||
}
|
||||
|
||||
|
|
9
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go
generated
vendored
|
@ -366,8 +366,15 @@ func (asw *actualStateOfWorld) addVolume(
|
|||
globallyMounted: false,
|
||||
devicePath: devicePath,
|
||||
}
|
||||
asw.attachedVolumes[volumeName] = volumeObj
|
||||
} else {
|
||||
// If volume object already exists, update the fields such as device path
|
||||
volumeObj.devicePath = devicePath
|
||||
volumeObj.spec = volumeSpec
|
||||
glog.V(2).Infof("Volume %q is already added to attachedVolume list, update device path %q",
|
||||
volumeName,
|
||||
devicePath)
|
||||
}
|
||||
asw.attachedVolumes[volumeName] = volumeObj
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -79,6 +79,12 @@ func exceptKey(except string) includeFunc {
|
|||
|
||||
// etcdWatcher converts a native etcd watch to a watch.Interface.
|
||||
type etcdWatcher struct {
|
||||
// HighWaterMarks for performance debugging.
|
||||
// Important: Since HighWaterMark is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platforms
|
||||
// See: https://golang.org/pkg/sync/atomic/ for more information
|
||||
incomingHWM HighWaterMark
|
||||
outgoingHWM HighWaterMark
|
||||
|
||||
encoding runtime.Codec
|
||||
// Note that versioner is required for etcdWatcher to work correctly.
|
||||
// There is no public constructor of it, so be careful when manipulating
|
||||
|
@ -108,10 +114,6 @@ type etcdWatcher struct {
|
|||
// Injectable for testing. Send the event down the outgoing channel.
|
||||
emit func(watch.Event)
|
||||
|
||||
// HighWaterMarks for performance debugging.
|
||||
incomingHWM HighWaterMark
|
||||
outgoingHWM HighWaterMark
|
||||
|
||||
cache etcdCache
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ var (
|
|||
// semantic version is a git hash, but the version itself is no
|
||||
// longer the direct output of "git describe", but a slight
|
||||
// translation to be semver compliant.
|
||||
gitVersion string = "v1.4.0+$Format:%h$"
|
||||
gitVersion string = "v1.4.1+$Format:%h$"
|
||||
gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
|
||||
gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty"
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ func ProbeVolumePlugins(volumeConfig volume.VolumeConfig) []volume.VolumePlugin
|
|||
type hostPathPlugin struct {
|
||||
host volume.VolumeHost
|
||||
// decouple creating Recyclers/Deleters/Provisioners by deferring to a function. Allows for easier testing.
|
||||
newRecyclerFunc func(pvName string, spec *volume.Spec, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error)
|
||||
newRecyclerFunc func(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error)
|
||||
newDeleterFunc func(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error)
|
||||
newProvisionerFunc func(options volume.VolumeOptions, host volume.VolumeHost) (volume.Provisioner, error)
|
||||
config volume.VolumeConfig
|
||||
|
@ -112,8 +112,8 @@ func (plugin *hostPathPlugin) NewUnmounter(volName string, podUID types.UID) (vo
|
|||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewRecycler(pvName string, spec *volume.Spec) (volume.Recycler, error) {
|
||||
return plugin.newRecyclerFunc(pvName, spec, plugin.host, plugin.config)
|
||||
func (plugin *hostPathPlugin) NewRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) (volume.Recycler, error) {
|
||||
return plugin.newRecyclerFunc(pvName, spec, eventRecorder, plugin.host, plugin.config)
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
|
@ -142,18 +142,19 @@ func (plugin *hostPathPlugin) ConstructVolumeSpec(volumeName, mountPath string)
|
|||
return volume.NewSpecFromVolume(hostPathVolume), nil
|
||||
}
|
||||
|
||||
func newRecycler(pvName string, spec *volume.Spec, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) {
|
||||
func newRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) {
|
||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
|
||||
}
|
||||
path := spec.PersistentVolume.Spec.HostPath.Path
|
||||
return &hostPathRecycler{
|
||||
name: spec.Name(),
|
||||
path: path,
|
||||
host: host,
|
||||
config: config,
|
||||
timeout: volume.CalculateTimeoutForVolume(config.RecyclerMinimumTimeout, config.RecyclerTimeoutIncrement, spec.PersistentVolume),
|
||||
pvName: pvName,
|
||||
name: spec.Name(),
|
||||
path: path,
|
||||
host: host,
|
||||
config: config,
|
||||
timeout: volume.CalculateTimeoutForVolume(config.RecyclerMinimumTimeout, config.RecyclerTimeoutIncrement, spec.PersistentVolume),
|
||||
pvName: pvName,
|
||||
eventRecorder: eventRecorder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -234,7 +235,8 @@ type hostPathRecycler struct {
|
|||
config volume.VolumeConfig
|
||||
timeout int64
|
||||
volume.MetricsNil
|
||||
pvName string
|
||||
pvName string
|
||||
eventRecorder volume.RecycleEventRecorder
|
||||
}
|
||||
|
||||
func (r *hostPathRecycler) GetPath() string {
|
||||
|
@ -253,7 +255,7 @@ func (r *hostPathRecycler) Recycle() error {
|
|||
Path: r.path,
|
||||
},
|
||||
}
|
||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient())
|
||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient(), r.eventRecorder)
|
||||
}
|
||||
|
||||
// hostPathProvisioner implements a Provisioner for the HostPath plugin
|
||||
|
|
|
@ -46,7 +46,7 @@ func ProbeVolumePlugins(volumeConfig volume.VolumeConfig) []volume.VolumePlugin
|
|||
type nfsPlugin struct {
|
||||
host volume.VolumeHost
|
||||
// decouple creating recyclers by deferring to a function. Allows for easier testing.
|
||||
newRecyclerFunc func(pvName string, spec *volume.Spec, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error)
|
||||
newRecyclerFunc func(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error)
|
||||
config volume.VolumeConfig
|
||||
}
|
||||
|
||||
|
@ -132,8 +132,8 @@ func (plugin *nfsPlugin) newUnmounterInternal(volName string, podUID types.UID,
|
|||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *nfsPlugin) NewRecycler(pvName string, spec *volume.Spec) (volume.Recycler, error) {
|
||||
return plugin.newRecyclerFunc(pvName, spec, plugin.host, plugin.config)
|
||||
func (plugin *nfsPlugin) NewRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) (volume.Recycler, error) {
|
||||
return plugin.newRecyclerFunc(pvName, spec, eventRecorder, plugin.host, plugin.config)
|
||||
}
|
||||
|
||||
func (plugin *nfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
|
@ -274,18 +274,19 @@ func (c *nfsUnmounter) TearDownAt(dir string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func newRecycler(pvName string, spec *volume.Spec, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error) {
|
||||
func newRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error) {
|
||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.NFS == nil {
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.NFS is nil")
|
||||
}
|
||||
return &nfsRecycler{
|
||||
name: spec.Name(),
|
||||
server: spec.PersistentVolume.Spec.NFS.Server,
|
||||
path: spec.PersistentVolume.Spec.NFS.Path,
|
||||
host: host,
|
||||
config: volumeConfig,
|
||||
timeout: volume.CalculateTimeoutForVolume(volumeConfig.RecyclerMinimumTimeout, volumeConfig.RecyclerTimeoutIncrement, spec.PersistentVolume),
|
||||
pvName: pvName,
|
||||
name: spec.Name(),
|
||||
server: spec.PersistentVolume.Spec.NFS.Server,
|
||||
path: spec.PersistentVolume.Spec.NFS.Path,
|
||||
host: host,
|
||||
config: volumeConfig,
|
||||
timeout: volume.CalculateTimeoutForVolume(volumeConfig.RecyclerMinimumTimeout, volumeConfig.RecyclerTimeoutIncrement, spec.PersistentVolume),
|
||||
pvName: pvName,
|
||||
eventRecorder: eventRecorder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -298,7 +299,8 @@ type nfsRecycler struct {
|
|||
config volume.VolumeConfig
|
||||
timeout int64
|
||||
volume.MetricsNil
|
||||
pvName string
|
||||
pvName string
|
||||
eventRecorder volume.RecycleEventRecorder
|
||||
}
|
||||
|
||||
func (r *nfsRecycler) GetPath() string {
|
||||
|
@ -318,7 +320,7 @@ func (r *nfsRecycler) Recycle() error {
|
|||
Path: r.path,
|
||||
},
|
||||
}
|
||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient())
|
||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient(), r.eventRecorder)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*api.NFSVolumeSource, bool, error) {
|
||||
|
|
|
@ -124,9 +124,12 @@ type PersistentVolumePlugin interface {
|
|||
// again to new claims
|
||||
type RecyclableVolumePlugin interface {
|
||||
VolumePlugin
|
||||
// NewRecycler creates a new volume.Recycler which knows how to reclaim
|
||||
// this resource after the volume's release from a PersistentVolumeClaim
|
||||
NewRecycler(pvName string, spec *Spec) (Recycler, error)
|
||||
// NewRecycler creates a new volume.Recycler which knows how to reclaim this
|
||||
// resource after the volume's release from a PersistentVolumeClaim. The
|
||||
// recycler will use the provided recorder to write any events that might be
|
||||
// interesting to user. It's expected that caller will pass these events to
|
||||
// the PV being recycled.
|
||||
NewRecycler(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) (Recycler, error)
|
||||
}
|
||||
|
||||
// DeletableVolumePlugin is an extended interface of VolumePlugin and is used
|
||||
|
|
|
@ -19,13 +19,10 @@ package volume
|
|||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
"hash/fnv"
|
||||
|
@ -39,6 +36,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
type RecycleEventRecorder func(eventtype, message string)
|
||||
|
||||
// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume
|
||||
// Recyclers. This function will save the given Pod to the API and watch it
|
||||
// until it completes, fails, or the pod's ActiveDeadlineSeconds is exceeded,
|
||||
|
@ -52,8 +51,8 @@ import (
|
|||
// pod - the pod designed by a volume plugin to recycle the volume. pod.Name
|
||||
// will be overwritten with unique name based on PV.Name.
|
||||
// client - kube client for API operations.
|
||||
func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.Pod, kubeClient clientset.Interface) error {
|
||||
return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient))
|
||||
func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error {
|
||||
return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder))
|
||||
}
|
||||
|
||||
// same as above func comments, except 'recyclerClient' is a narrower pod API
|
||||
|
@ -67,34 +66,61 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.P
|
|||
pod.Name = "recycler-for-" + pvName
|
||||
pod.GenerateName = ""
|
||||
|
||||
stopChannel := make(chan struct{})
|
||||
defer close(stopChannel)
|
||||
podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Start the pod
|
||||
_, err := recyclerClient.CreatePod(pod)
|
||||
_, err = recyclerClient.CreatePod(pod)
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
glog.V(5).Infof("old recycler pod %q found for volume", pod.Name)
|
||||
} else {
|
||||
return fmt.Errorf("Unexpected error creating recycler pod: %+v\n", err)
|
||||
return fmt.Errorf("unexpected error creating recycler pod: %+v\n", err)
|
||||
}
|
||||
}
|
||||
defer recyclerClient.DeletePod(pod.Name, pod.Namespace)
|
||||
|
||||
// Now only the old pod or the new pod run. Watch it until it finishes.
|
||||
stopChannel := make(chan struct{})
|
||||
defer close(stopChannel)
|
||||
nextPod := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel)
|
||||
|
||||
// Now only the old pod or the new pod run. Watch it until it finishes
|
||||
// and send all events on the pod to the PV
|
||||
for {
|
||||
watchedPod := nextPod()
|
||||
if watchedPod.Status.Phase == api.PodSucceeded {
|
||||
// volume.Recycle() returns nil on success, else error
|
||||
return nil
|
||||
}
|
||||
if watchedPod.Status.Phase == api.PodFailed {
|
||||
// volume.Recycle() returns nil on success, else error
|
||||
if watchedPod.Status.Message != "" {
|
||||
return fmt.Errorf(watchedPod.Status.Message)
|
||||
} else {
|
||||
return fmt.Errorf("pod failed, pod.Status.Message unknown.")
|
||||
event := <-podCh
|
||||
switch event.Object.(type) {
|
||||
case *api.Pod:
|
||||
// POD changed
|
||||
pod := event.Object.(*api.Pod)
|
||||
glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase)
|
||||
switch event.Type {
|
||||
case watch.Added, watch.Modified:
|
||||
if pod.Status.Phase == api.PodSucceeded {
|
||||
// Recycle succeeded.
|
||||
return nil
|
||||
}
|
||||
if pod.Status.Phase == api.PodFailed {
|
||||
if pod.Status.Message != "" {
|
||||
return fmt.Errorf(pod.Status.Message)
|
||||
} else {
|
||||
return fmt.Errorf("pod failed, pod.Status.Message unknown.")
|
||||
}
|
||||
}
|
||||
|
||||
case watch.Deleted:
|
||||
return fmt.Errorf("recycler pod was deleted")
|
||||
|
||||
case watch.Error:
|
||||
return fmt.Errorf("recycler pod watcher failed")
|
||||
}
|
||||
|
||||
case *api.Event:
|
||||
// Event received
|
||||
podEvent := event.Object.(*api.Event)
|
||||
glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message)
|
||||
if event.Type == watch.Added {
|
||||
recyclerClient.Event(podEvent.Type, podEvent.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -106,15 +132,24 @@ type recyclerClient interface {
|
|||
CreatePod(pod *api.Pod) (*api.Pod, error)
|
||||
GetPod(name, namespace string) (*api.Pod, error)
|
||||
DeletePod(name, namespace string) error
|
||||
WatchPod(name, namespace string, stopChannel chan struct{}) func() *api.Pod
|
||||
// WatchPod returns a ListWatch for watching a pod. The stopChannel is used
|
||||
// to close the reflector backing the watch. The caller is responsible for
|
||||
// derring a close on the channel to stop the reflector.
|
||||
WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error)
|
||||
// Event sends an event to the volume that is being recycled.
|
||||
Event(eventtype, message string)
|
||||
}
|
||||
|
||||
func newRecyclerClient(client clientset.Interface) recyclerClient {
|
||||
return &realRecyclerClient{client}
|
||||
func newRecyclerClient(client clientset.Interface, recorder RecycleEventRecorder) recyclerClient {
|
||||
return &realRecyclerClient{
|
||||
client,
|
||||
recorder,
|
||||
}
|
||||
}
|
||||
|
||||
type realRecyclerClient struct {
|
||||
client clientset.Interface
|
||||
client clientset.Interface
|
||||
recorder RecycleEventRecorder
|
||||
}
|
||||
|
||||
func (c *realRecyclerClient) CreatePod(pod *api.Pod) (*api.Pod, error) {
|
||||
|
@ -129,28 +164,60 @@ func (c *realRecyclerClient) DeletePod(name, namespace string) error {
|
|||
return c.client.Core().Pods(namespace).Delete(name, nil)
|
||||
}
|
||||
|
||||
// WatchPod returns a ListWatch for watching a pod. The stopChannel is used
|
||||
// to close the reflector backing the watch. The caller is responsible for
|
||||
// derring a close on the channel to stop the reflector.
|
||||
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) func() *api.Pod {
|
||||
fieldSelector, _ := fields.ParseSelector("metadata.name=" + name)
|
||||
func (c *realRecyclerClient) Event(eventtype, message string) {
|
||||
c.recorder(eventtype, message)
|
||||
}
|
||||
|
||||
podLW := &cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
return c.client.Core().Pods(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
return c.client.Core().Pods(namespace).Watch(options)
|
||||
},
|
||||
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
|
||||
podSelector, _ := fields.ParseSelector("metadata.name=" + name)
|
||||
options := api.ListOptions{
|
||||
FieldSelector: podSelector,
|
||||
Watch: true,
|
||||
}
|
||||
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
|
||||
cache.NewReflector(podLW, &api.Pod{}, queue, 1*time.Minute).RunUntil(stopChannel)
|
||||
|
||||
return func() *api.Pod {
|
||||
return cache.Pop(queue).(*api.Pod)
|
||||
podWatch, err := c.client.Core().Pods(namespace).Watch(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name)
|
||||
eventWatch, err := c.client.Core().Events(namespace).Watch(api.ListOptions{
|
||||
FieldSelector: eventSelector,
|
||||
Watch: true,
|
||||
})
|
||||
if err != nil {
|
||||
podWatch.Stop()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eventCh := make(chan watch.Event, 0)
|
||||
|
||||
go func() {
|
||||
defer eventWatch.Stop()
|
||||
defer podWatch.Stop()
|
||||
defer close(eventCh)
|
||||
|
||||
for {
|
||||
select {
|
||||
case _ = <-stopChannel:
|
||||
return
|
||||
|
||||
case podEvent, ok := <-podWatch.ResultChan():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
eventCh <- podEvent
|
||||
|
||||
case eventEvent, ok := <-eventWatch.ResultChan():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
eventCh <- eventEvent
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return eventCh, nil
|
||||
}
|
||||
|
||||
// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a
|
||||
|
|
6
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/admission.go
generated
vendored
6
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/admission.go
generated
vendored
|
@ -19,6 +19,7 @@ package autoprovision
|
|||
import (
|
||||
"io"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
||||
"fmt"
|
||||
|
@ -26,8 +27,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -42,7 +42,7 @@ func init() {
|
|||
type provision struct {
|
||||
*admission.Handler
|
||||
client clientset.Interface
|
||||
namespaceInformer framework.SharedIndexInformer
|
||||
namespaceInformer cache.SharedIndexInformer
|
||||
}
|
||||
|
||||
var _ = admission.WantsInformerFactory(&provision{})
|
||||
|
|
|
@ -19,6 +19,7 @@ package exists
|
|||
import (
|
||||
"io"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
||||
"fmt"
|
||||
|
@ -26,8 +27,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -42,7 +42,7 @@ func init() {
|
|||
type exists struct {
|
||||
*admission.Handler
|
||||
client clientset.Interface
|
||||
namespaceInformer framework.SharedIndexInformer
|
||||
namespaceInformer cache.SharedIndexInformer
|
||||
}
|
||||
|
||||
var _ = admission.WantsInformerFactory(&exists{})
|
||||
|
|
|
@ -23,9 +23,9 @@ import (
|
|||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
@ -52,7 +52,7 @@ type lifecycle struct {
|
|||
*admission.Handler
|
||||
client clientset.Interface
|
||||
immortalNamespaces sets.String
|
||||
namespaceInformer framework.SharedIndexInformer
|
||||
namespaceInformer cache.SharedIndexInformer
|
||||
// forceLiveLookupCache holds a list of entries for namespaces that we have a strong reason to believe are stale in our local cache.
|
||||
// if a namespace is in this cache, then we will ignore our local state and always fetch latest from api server.
|
||||
forceLiveLookupCache *lru.Cache
|
||||
|
|
|
@ -18,6 +18,7 @@ package predicates
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go
generated
vendored
|
@ -968,7 +968,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *api.Pod, allPods
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if affinity.PodAntiAffinity != nil {
|
||||
if affinity != nil && affinity.PodAntiAffinity != nil {
|
||||
existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/runtime"
|
||||
|
@ -77,8 +76,8 @@ type ConfigFactory struct {
|
|||
// Close this to stop all reflectors
|
||||
StopEverything chan struct{}
|
||||
|
||||
scheduledPodPopulator *framework.Controller
|
||||
nodePopulator *framework.Controller
|
||||
scheduledPodPopulator *cache.Controller
|
||||
nodePopulator *cache.Controller
|
||||
|
||||
schedulerCache schedulercache.Cache
|
||||
|
||||
|
@ -125,11 +124,11 @@ func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffini
|
|||
// We construct this here instead of in CreateFromKeys because
|
||||
// ScheduledPodLister is something we provide to plug in functions that
|
||||
// they may need to call.
|
||||
c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = framework.NewIndexerInformer(
|
||||
c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = cache.NewIndexerInformer(
|
||||
c.createAssignedNonTerminatedPodLW(),
|
||||
&api.Pod{},
|
||||
0,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.addPodToCache,
|
||||
UpdateFunc: c.updatePodInCache,
|
||||
DeleteFunc: c.deletePodFromCache,
|
||||
|
@ -137,11 +136,11 @@ func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffini
|
|||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
c.NodeLister.Store, c.nodePopulator = framework.NewInformer(
|
||||
c.NodeLister.Store, c.nodePopulator = cache.NewInformer(
|
||||
c.createNodeLW(),
|
||||
&api.Node{},
|
||||
0,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.addNodeToCache,
|
||||
UpdateFunc: c.updateNodeInCache,
|
||||
DeleteFunc: c.deleteNodeFromCache,
|
||||
|
|
|
@ -244,12 +244,12 @@ func (cache *schedulerCache) RemovePod(pod *api.Pod) error {
|
|||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
|
||||
_, ok := cache.podStates[key]
|
||||
cachedstate, ok := cache.podStates[key]
|
||||
switch {
|
||||
// An assumed pod won't have Delete/Remove event. It needs to have Add event
|
||||
// before Remove event, in which case the state would change from Assumed to Added.
|
||||
case ok && !cache.assumedPods[key]:
|
||||
err := cache.removePod(pod)
|
||||
err := cache.removePod(cachedstate.pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue