Merge pull request #192 from keel-hq/feature/stateful_set

Feature/stateful set
pull/201/head 0.9.0-rc1
Karolis Rusenas 2018-04-20 21:09:17 +01:00 committed by GitHub
commit 689c921fea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
123 changed files with 11328 additions and 3120 deletions

25
Gopkg.lock generated
View File

@ -67,6 +67,12 @@
] ]
revision = "835dc879394a24080bf1c01e199c4cda001b6c46" revision = "835dc879394a24080bf1c01e199c4cda001b6c46"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]] [[projects]]
name = "github.com/docker/distribution" name = "github.com/docker/distribution"
packages = [ packages = [
@ -223,6 +229,15 @@
] ]
revision = "2bcd89a1743fd4b373f7370ce8ddc14dfbd18229" revision = "2bcd89a1743fd4b373f7370ce8ddc14dfbd18229"
[[projects]]
branch = "master"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru"
]
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/howeyc/gopass" name = "github.com/howeyc/gopass"
@ -588,6 +603,7 @@
"pkg/api/errors", "pkg/api/errors",
"pkg/api/meta", "pkg/api/meta",
"pkg/api/resource", "pkg/api/resource",
"pkg/apis/meta/internalversion",
"pkg/apis/meta/v1", "pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured", "pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1alpha1", "pkg/apis/meta/v1alpha1",
@ -605,7 +621,9 @@
"pkg/runtime/serializer/versioning", "pkg/runtime/serializer/versioning",
"pkg/selection", "pkg/selection",
"pkg/types", "pkg/types",
"pkg/util/cache",
"pkg/util/clock", "pkg/util/clock",
"pkg/util/diff",
"pkg/util/errors", "pkg/util/errors",
"pkg/util/framer", "pkg/util/framer",
"pkg/util/intstr", "pkg/util/intstr",
@ -662,19 +680,22 @@
"rest", "rest",
"rest/watch", "rest/watch",
"tools/auth", "tools/auth",
"tools/cache",
"tools/clientcmd", "tools/clientcmd",
"tools/clientcmd/api", "tools/clientcmd/api",
"tools/clientcmd/api/latest", "tools/clientcmd/api/latest",
"tools/clientcmd/api/v1", "tools/clientcmd/api/v1",
"tools/metrics", "tools/metrics",
"tools/pager",
"tools/reference", "tools/reference",
"transport", "transport",
"util/buffer",
"util/cert", "util/cert",
"util/flowcontrol", "util/flowcontrol",
"util/homedir", "util/homedir",
"util/integer" "util/integer"
] ]
revision = "9389c055a838d4f208b699b3c7c51b70f2368861" revision = "90539b4e75a8daaf7f67c3874c6180bfb1a63936"
[[projects]] [[projects]]
name = "k8s.io/helm" name = "k8s.io/helm"
@ -702,6 +723,6 @@
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "646011bec8403e16f31fcfb402c9713d07cc23f06083d28c9e6f8afbd0be92a8" inputs-digest = "0f1c009db7cc6283ff5ab258f8e65fcbc1e0c6f3448ddb47087f9d07f33bb65f"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View File

@ -7,7 +7,7 @@ import (
"github.com/keel-hq/keel/bot/formatter" "github.com/keel-hq/keel/bot/formatter"
"github.com/keel-hq/keel/provider/kubernetes" "github.com/keel-hq/keel/provider/kubernetes"
"k8s.io/api/extensions/v1beta1" apps_v1 "k8s.io/api/apps/v1"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -19,8 +19,8 @@ type Filter struct {
} }
// deployments - gets all deployments // deployments - gets all deployments
func deployments(k8sImplementer kubernetes.Implementer) ([]v1beta1.Deployment, error) { func deployments(k8sImplementer kubernetes.Implementer) ([]apps_v1.Deployment, error) {
deploymentLists := []*v1beta1.DeploymentList{} deploymentLists := []*apps_v1.DeploymentList{}
n, err := k8sImplementer.Namespaces() n, err := k8sImplementer.Namespaces()
if err != nil { if err != nil {
@ -39,7 +39,7 @@ func deployments(k8sImplementer kubernetes.Implementer) ([]v1beta1.Deployment, e
deploymentLists = append(deploymentLists, l) deploymentLists = append(deploymentLists, l)
} }
impacted := []v1beta1.Deployment{} impacted := []apps_v1.Deployment{}
for _, deploymentList := range deploymentLists { for _, deploymentList := range deploymentLists {
for _, deployment := range deploymentList.Items { for _, deployment := range deploymentList.Items {
@ -71,7 +71,7 @@ func DeploymentsResponse(filter Filter, k8sImplementer kubernetes.Implementer) s
return buf.String() return buf.String()
} }
func convertToInternal(deployments []v1beta1.Deployment) []formatter.Deployment { func convertToInternal(deployments []apps_v1.Deployment) []formatter.Deployment {
formatted := []formatter.Deployment{} formatted := []formatter.Deployment{}
for _, d := range deployments { for _, d := range deployments {
@ -86,7 +86,7 @@ func convertToInternal(deployments []v1beta1.Deployment) []formatter.Deployment
return formatted return formatted
} }
func getImages(deployment *v1beta1.Deployment) []string { func getImages(deployment *apps_v1.Deployment) []string {
var images []string var images []string
for _, c := range deployment.Spec.Template.Spec.Containers { for _, c := range deployment.Spec.Template.Spec.Containers {
images = append(images, c.Image) images = append(images, c.Image)

View File

@ -17,6 +17,8 @@ import (
"github.com/keel-hq/keel/constants" "github.com/keel-hq/keel/constants"
"github.com/keel-hq/keel/extension/notification" "github.com/keel-hq/keel/extension/notification"
"github.com/keel-hq/keel/internal/k8s"
"github.com/keel-hq/keel/internal/workgroup"
"github.com/keel-hq/keel/provider" "github.com/keel-hq/keel/provider"
"github.com/keel-hq/keel/provider/helm" "github.com/keel-hq/keel/provider/helm"
"github.com/keel-hq/keel/provider/kubernetes" "github.com/keel-hq/keel/provider/kubernetes"
@ -133,6 +135,18 @@ func main() {
}).Fatal("main: failed to create kubernetes implementer") }).Fatal("main: failed to create kubernetes implementer")
} }
var g workgroup.Group
t := &k8s.Translator{
FieldLogger: log.WithField("context", "translator"),
}
buf := k8s.NewBuffer(&g, t, log.StandardLogger(), 128)
wl := log.WithField("context", "watch")
k8s.WatchDeployments(&g, implementer.Client(), wl, buf)
k8s.WatchStatefulSets(&g, implementer.Client(), wl, buf)
k8s.WatchDaemonSets(&g, implementer.Client(), wl, buf)
keelsNamespace := constants.DefaultNamespace keelsNamespace := constants.DefaultNamespace
if os.Getenv(EnvNamespace) != "" { if os.Getenv(EnvNamespace) != "" {
keelsNamespace = os.Getenv(EnvNamespace) keelsNamespace = os.Getenv(EnvNamespace)
@ -147,16 +161,13 @@ func main() {
} }
serializer := codecs.DefaultSerializer() serializer := codecs.DefaultSerializer()
// mem := memory.NewMemoryCache(24*time.Hour, 24*time.Hour, 1*time.Minute)
approvalsManager := approvals.New(kkv, serializer) approvalsManager := approvals.New(kkv, serializer)
go approvalsManager.StartExpiryService(ctx) go approvalsManager.StartExpiryService(ctx)
// setting up providers // setting up providers
providers := setupProviders(implementer, sender, approvalsManager) providers := setupProviders(implementer, sender, approvalsManager, &t.GenericResourceCache)
secretsGetter := secrets.NewGetter(implementer) secretsGetter := secrets.NewGetter(implementer)
teardownTriggers := setupTriggers(ctx, providers, secretsGetter, approvalsManager) teardownTriggers := setupTriggers(ctx, providers, secretsGetter, approvalsManager)
bot.Run(implementer, approvalsManager) bot.Run(implementer, approvalsManager)
@ -164,40 +175,38 @@ func main() {
signalChan := make(chan os.Signal, 1) signalChan := make(chan os.Signal, 1)
cleanupDone := make(chan bool) cleanupDone := make(chan bool)
signal.Notify(signalChan, os.Interrupt) signal.Notify(signalChan, os.Interrupt)
go func() { g.Add(func(stop <-chan struct{}) {
for _ = range signalChan { go func() {
log.Info("received an interrupt, closing connection...") for range signalChan {
log.Info("received an interrupt, shutting down...")
go func() { go func() {
select { select {
case <-time.After(10 * time.Second): case <-time.After(10 * time.Second):
log.Info("connection shutdown took too long, exiting... ") log.Info("connection shutdown took too long, exiting... ")
close(cleanupDone) close(cleanupDone)
return return
case <-cleanupDone: case <-cleanupDone:
return return
} }
}() }()
providers.Stop()
// teardownProviders() teardownTriggers()
providers.Stop() bot.Stop()
teardownTriggers()
bot.Stop()
cleanupDone <- true
}
}()
<-cleanupDone
cleanupDone <- true
}
}()
<-cleanupDone
})
g.Run()
} }
// setupProviders - setting up available providers. New providers should be initialised here and added to // setupProviders - setting up available providers. New providers should be initialised here and added to
// provider map // provider map
func setupProviders(k8sImplementer kubernetes.Implementer, sender notification.Sender, approvalsManager approvals.Manager) (providers provider.Providers) { func setupProviders(k8sImplementer kubernetes.Implementer, sender notification.Sender, approvalsManager approvals.Manager, grc *k8s.GenericResourceCache) (providers provider.Providers) {
var enabledProviders []provider.Provider var enabledProviders []provider.Provider
k8sProvider, err := kubernetes.NewProvider(k8sImplementer, sender, approvalsManager) k8sProvider, err := kubernetes.NewProvider(k8sImplementer, sender, approvalsManager, grc)
if err != nil { if err != nil {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"error": err, "error": err,

112
internal/k8s/cache.go Normal file
View File

@ -0,0 +1,112 @@
package k8s
import (
"sort"
"sync"
)
type genericResourceCache struct {
sync.Mutex
values []*GenericResource
}
// GenericResourceCache - storage for generic resources with a rendezvous point for goroutines
// waiting for or announcing the occurence of a cache events.
type GenericResourceCache struct {
genericResourceCache
Cond
}
// Values returns a copy of the contents of the cache.
func (cc *genericResourceCache) Values() []*GenericResource {
cc.Lock()
r := append([]*GenericResource{}, cc.values...)
cc.Unlock()
return r
}
// Add adds an entry to the cache. If a GenericResource with the same
// name exists, it is replaced.
func (cc *genericResourceCache) Add(grs ...*GenericResource) {
if len(grs) == 0 {
return
}
cc.Lock()
sort.Sort(genericResource(cc.values))
for _, gr := range grs {
cc.add(gr)
}
cc.Unlock()
}
// add adds c to the cache. If c is already present, the cached value of c is overwritten.
// invariant: cc.values should be sorted on entry.
func (cc *genericResourceCache) add(c *GenericResource) {
i := sort.Search(len(cc.values), func(i int) bool { return cc.values[i].Identifier >= c.Identifier })
if i < len(cc.values) && cc.values[i].Identifier == c.Identifier {
// c is already present, replace
cc.values[i] = c
} else {
// c is not present, append
cc.values = append(cc.values, c)
// restort to convert append into insert
sort.Sort(genericResource(cc.values))
}
}
// Remove removes the named entry from the cache. If the entry
// is not present in the cache, the operation is a no-op.
func (cc *genericResourceCache) Remove(identifiers ...string) {
if len(identifiers) == 0 {
return
}
cc.Lock()
sort.Sort(genericResource(cc.values))
for _, n := range identifiers {
cc.remove(n)
}
cc.Unlock()
}
// remove removes the named entry from the cache.
// invariant: cc.values should be sorted on entry.
func (cc *genericResourceCache) remove(identifier string) {
i := sort.Search(len(cc.values), func(i int) bool { return cc.values[i].Identifier >= identifier })
if i < len(cc.values) && cc.values[i].Identifier == identifier {
// c is present, remove
cc.values = append(cc.values[:i], cc.values[i+1:]...)
}
}
// Cond implements a condition variable, a rendezvous point for goroutines
// waiting for or announcing the occurence of an event.
type Cond struct {
mu sync.Mutex
waiters []chan int
last int
}
// Register registers ch to receive a value when Notify is called.
func (c *Cond) Register(ch chan int, last int) {
c.mu.Lock()
defer c.mu.Unlock()
if last < c.last {
// notify this channel immediately
ch <- c.last
return
}
c.waiters = append(c.waiters, ch)
}
// Notify notifies all registered waiters that an event has occured.
func (c *Cond) Notify() {
c.mu.Lock()
defer c.mu.Unlock()
c.last++
for _, ch := range c.waiters {
ch <- c.last
}
c.waiters = c.waiters[:0]
}

52
internal/k8s/converter.go Normal file
View File

@ -0,0 +1,52 @@
package k8s
import (
apps_v1 "k8s.io/api/apps/v1"
core_v1 "k8s.io/api/core/v1"
)
func getContainerImages(containers []core_v1.Container) []string {
var images []string
for _, c := range containers {
images = append(images, c.Image)
}
return images
}
func getImagePullSecrets(imagePullSecrets []core_v1.LocalObjectReference) []string {
var secrets []string
for _, s := range imagePullSecrets {
secrets = append(secrets, s.Name)
}
return secrets
}
// deployments
func getDeploymentIdentifier(d *apps_v1.Deployment) string {
return "deployment/" + d.Namespace + "/" + d.Name
}
func updateDeploymentContainer(d *apps_v1.Deployment, index int, image string) {
d.Spec.Template.Spec.Containers[index].Image = image
}
// stateful sets https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/
func getStatefulSetIdentifier(ss *apps_v1.StatefulSet) string {
return "statefulset/" + ss.Namespace + "/" + ss.Name
}
func updateStatefulSetContainer(ss *apps_v1.StatefulSet, index int, image string) {
ss.Spec.Template.Spec.Containers[index].Image = image
}
// daemonsets
func getDaemonsetSetIdentifier(s *apps_v1.DaemonSet) string {
return "daemonset/" + s.Namespace + "/" + s.Name
}
func updateDaemonsetSetContainer(s *apps_v1.DaemonSet, index int, image string) {
s.Spec.Template.Spec.Containers[index].Image = image
}

251
internal/k8s/resource.go Normal file
View File

@ -0,0 +1,251 @@
package k8s
import (
"fmt"
"reflect"
"strings"
apps_v1 "k8s.io/api/apps/v1"
core_v1 "k8s.io/api/core/v1"
)
// GenericResource - generic resource,
// used to work with multiple kinds of k8s resources
type GenericResource struct {
// original resource
obj interface{}
Identifier string
Namespace string
Name string
}
type genericResource []*GenericResource
func (c genericResource) Len() int {
return len(c)
}
func (c genericResource) Swap(i, j int) {
c[i], c[j] = c[j], c[i]
}
func (c genericResource) Less(i, j int) bool {
return c[i].Identifier < c[j].Identifier
}
// NewGenericResource - create new generic k8s resource
func NewGenericResource(obj interface{}) (*GenericResource, error) {
switch obj.(type) {
case *apps_v1.Deployment, *apps_v1.StatefulSet, *apps_v1.DaemonSet:
// ok
default:
return nil, fmt.Errorf("unsupported resource type: %v", reflect.TypeOf(obj).Kind())
}
gr := &GenericResource{
obj: obj,
}
gr.Identifier = gr.GetIdentifier()
gr.Namespace = gr.GetNamespace()
gr.Name = gr.GetName()
return gr, nil
}
func (r *GenericResource) String() string {
return fmt.Sprintf("%s/%s/%s images: %s", r.Kind(), r.Namespace, r.Name, strings.Join(r.GetImages(), ", "))
}
// GetIdentifier returns resource identifier
func (r *GenericResource) GetIdentifier() string {
switch obj := r.obj.(type) {
case *apps_v1.Deployment:
return getDeploymentIdentifier(obj)
case *apps_v1.StatefulSet:
return getStatefulSetIdentifier(obj)
case *apps_v1.DaemonSet:
return getDaemonsetSetIdentifier(obj)
}
return ""
}
// GetName returns resource name
func (r *GenericResource) GetName() string {
switch obj := r.obj.(type) {
case *apps_v1.Deployment:
return obj.GetName()
case *apps_v1.StatefulSet:
return obj.GetName()
case *apps_v1.DaemonSet:
return obj.GetName()
}
return ""
}
// GetNamespace returns resource namespace
func (r *GenericResource) GetNamespace() string {
switch obj := r.obj.(type) {
case *apps_v1.Deployment:
return obj.GetNamespace()
case *apps_v1.StatefulSet:
return obj.GetNamespace()
case *apps_v1.DaemonSet:
return obj.GetNamespace()
}
return ""
}
// Kind returns a type of resource that this structure represents
func (r *GenericResource) Kind() string {
switch r.obj.(type) {
case *apps_v1.Deployment:
return "deployment"
case *apps_v1.StatefulSet:
return "statefulset"
case *apps_v1.DaemonSet:
return "daemonset"
}
return ""
}
// GetResource - get resource
func (r *GenericResource) GetResource() interface{} {
return r.obj
}
// GetLabels - get resource labels
func (r *GenericResource) GetLabels() (labels map[string]string) {
switch obj := r.obj.(type) {
case *apps_v1.Deployment:
return obj.GetLabels()
case *apps_v1.StatefulSet:
return obj.GetLabels()
case *apps_v1.DaemonSet:
return obj.GetLabels()
}
return
}
// SetLabels - set resource labels
func (r *GenericResource) SetLabels(labels map[string]string) {
switch obj := r.obj.(type) {
case *apps_v1.Deployment:
obj.SetLabels(labels)
case *apps_v1.StatefulSet:
obj.SetLabels(labels)
case *apps_v1.DaemonSet:
obj.SetLabels(labels)
}
return
}
// GetSpecAnnotations - get resource spec template annotations
func (r *GenericResource) GetSpecAnnotations() (annotations map[string]string) {
switch obj := r.obj.(type) {
case *apps_v1.Deployment:
a := obj.Spec.Template.GetAnnotations()
if a == nil {
return make(map[string]string)
}
return a
case *apps_v1.StatefulSet:
return obj.Spec.Template.GetAnnotations()
case *apps_v1.DaemonSet:
return obj.Spec.Template.GetAnnotations()
}
return
}
// SetSpecAnnotations - set resource spec template annotations
func (r *GenericResource) SetSpecAnnotations(annotations map[string]string) {
switch obj := r.obj.(type) {
case *apps_v1.Deployment:
obj.Spec.Template.SetAnnotations(annotations)
case *apps_v1.StatefulSet:
obj.Spec.Template.SetAnnotations(annotations)
case *apps_v1.DaemonSet:
obj.Spec.Template.SetAnnotations(annotations)
}
return
}
// GetAnnotations - get resource annotations
func (r *GenericResource) GetAnnotations() (annotations map[string]string) {
switch obj := r.obj.(type) {
case *apps_v1.Deployment:
return obj.GetAnnotations()
case *apps_v1.StatefulSet:
return obj.GetAnnotations()
case *apps_v1.DaemonSet:
return obj.GetAnnotations()
}
return
}
// SetAnnotations - set resource annotations
func (r *GenericResource) SetAnnotations(annotations map[string]string) {
switch obj := r.obj.(type) {
case *apps_v1.Deployment:
obj.SetAnnotations(annotations)
case *apps_v1.StatefulSet:
obj.SetAnnotations(annotations)
case *apps_v1.DaemonSet:
obj.SetAnnotations(annotations)
}
return
}
// GetImagePullSecrets - returns secrets from pod spec
func (r *GenericResource) GetImagePullSecrets() (secrets []string) {
switch obj := r.obj.(type) {
case *apps_v1.Deployment:
return getImagePullSecrets(obj.Spec.Template.Spec.ImagePullSecrets)
case *apps_v1.StatefulSet:
return getImagePullSecrets(obj.Spec.Template.Spec.ImagePullSecrets)
case *apps_v1.DaemonSet:
return getImagePullSecrets(obj.Spec.Template.Spec.ImagePullSecrets)
}
return
}
// GetImages - returns images used by this resource
func (r *GenericResource) GetImages() (images []string) {
switch obj := r.obj.(type) {
case *apps_v1.Deployment:
return getContainerImages(obj.Spec.Template.Spec.Containers)
case *apps_v1.StatefulSet:
return getContainerImages(obj.Spec.Template.Spec.Containers)
case *apps_v1.DaemonSet:
return getContainerImages(obj.Spec.Template.Spec.Containers)
}
return
}
// Containers - returns containers managed by this resource
func (r *GenericResource) Containers() (containers []core_v1.Container) {
switch obj := r.obj.(type) {
case *apps_v1.Deployment:
return obj.Spec.Template.Spec.Containers
case *apps_v1.StatefulSet:
return obj.Spec.Template.Spec.Containers
case *apps_v1.DaemonSet:
return obj.Spec.Template.Spec.Containers
}
return
}
// UpdateContainer - updates container image
func (r *GenericResource) UpdateContainer(index int, image string) {
switch obj := r.obj.(type) {
case *apps_v1.Deployment:
updateDeploymentContainer(obj, index, image)
case *apps_v1.StatefulSet:
updateStatefulSetContainer(obj, index, image)
case *apps_v1.DaemonSet:
updateDaemonsetSetContainer(obj, index, image)
}
return
}

View File

@ -0,0 +1,187 @@
package k8s
import (
"testing"
apps_v1 "k8s.io/api/apps/v1"
core_v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestDeployment(t *testing.T) {
d := &apps_v1.Deployment{
meta_v1.TypeMeta{},
meta_v1.ObjectMeta{
Name: "dep-1",
Namespace: "xxxx",
Annotations: map[string]string{},
Labels: map[string]string{},
},
apps_v1.DeploymentSpec{
Template: core_v1.PodTemplateSpec{
Spec: core_v1.PodSpec{
Containers: []core_v1.Container{
{
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
},
},
},
},
},
apps_v1.DeploymentStatus{},
}
gr, err := NewGenericResource(d)
if err != nil {
t.Fatalf("failed to create generic resource: %s", err)
}
gr.UpdateContainer(0, "hey/there")
updated, ok := gr.GetResource().(*apps_v1.Deployment)
if !ok {
t.Fatalf("conversion failed")
}
if updated.Spec.Template.Spec.Containers[0].Image != "hey/there" {
t.Errorf("unexpected image: %s", updated.Spec.Template.Spec.Containers[0].Image)
}
}
func TestDeploymentMultipleContainers(t *testing.T) {
d := &apps_v1.Deployment{
meta_v1.TypeMeta{},
meta_v1.ObjectMeta{
Name: "dep-1",
Namespace: "xxxx",
Annotations: map[string]string{},
Labels: map[string]string{},
},
apps_v1.DeploymentSpec{
Template: core_v1.PodTemplateSpec{
Spec: core_v1.PodSpec{
Containers: []core_v1.Container{
{
Image: "gcr.io/v2-namespace/hi-world:1.1.1",
},
{
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
},
{
Image: "gcr.io/v2-namespace/bye-world:1.1.1",
},
},
},
},
},
apps_v1.DeploymentStatus{},
}
gr, err := NewGenericResource(d)
if err != nil {
t.Fatalf("failed to create generic resource: %s", err)
}
gr.UpdateContainer(1, "hey/there")
updated, ok := gr.GetResource().(*apps_v1.Deployment)
if !ok {
t.Fatalf("conversion failed")
}
if updated.Spec.Template.Spec.Containers[1].Image != "hey/there" {
t.Errorf("unexpected image: %s", updated.Spec.Template.Spec.Containers[0].Image)
}
}
func TestStatefulSetMultipleContainers(t *testing.T) {
d := &apps_v1.StatefulSet{
meta_v1.TypeMeta{},
meta_v1.ObjectMeta{
Name: "dep-1",
Namespace: "xxxx",
Annotations: map[string]string{},
Labels: map[string]string{},
},
apps_v1.StatefulSetSpec{
Template: core_v1.PodTemplateSpec{
Spec: core_v1.PodSpec{
Containers: []core_v1.Container{
{
Image: "gcr.io/v2-namespace/hi-world:1.1.1",
},
{
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
},
{
Image: "gcr.io/v2-namespace/bye-world:1.1.1",
},
},
},
},
},
apps_v1.StatefulSetStatus{},
}
gr, err := NewGenericResource(d)
if err != nil {
t.Fatalf("failed to create generic resource: %s", err)
}
gr.UpdateContainer(1, "hey/there")
updated, ok := gr.GetResource().(*apps_v1.StatefulSet)
if !ok {
t.Fatalf("conversion failed")
}
if updated.Spec.Template.Spec.Containers[1].Image != "hey/there" {
t.Errorf("unexpected image: %s", updated.Spec.Template.Spec.Containers[0].Image)
}
}
func TestDaemonsetlSetMultipleContainers(t *testing.T) {
d := &apps_v1.DaemonSet{
meta_v1.TypeMeta{},
meta_v1.ObjectMeta{
Name: "dep-1",
Namespace: "xxxx",
Annotations: map[string]string{},
Labels: map[string]string{},
},
apps_v1.DaemonSetSpec{
Template: core_v1.PodTemplateSpec{
Spec: core_v1.PodSpec{
Containers: []core_v1.Container{
{
Image: "gcr.io/v2-namespace/hi-world:1.1.1",
},
{
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
},
{
Image: "gcr.io/v2-namespace/bye-world:1.1.1",
},
},
},
},
},
apps_v1.DaemonSetStatus{},
}
gr, err := NewGenericResource(d)
if err != nil {
t.Fatalf("failed to create generic resource: %s", err)
}
gr.UpdateContainer(1, "hey/there")
updated, ok := gr.GetResource().(*apps_v1.DaemonSet)
if !ok {
t.Fatalf("conversion failed")
}
if updated.Spec.Template.Spec.Containers[1].Image != "hey/there" {
t.Errorf("unexpected image: %s", updated.Spec.Template.Spec.Containers[0].Image)
}
}

View File

@ -0,0 +1,40 @@
package k8s
import (
"github.com/sirupsen/logrus"
)
type Translator struct {
logrus.FieldLogger
GenericResourceCache
KeelSelector string
}
func (t *Translator) OnAdd(obj interface{}) {
gr, err := NewGenericResource(obj)
if err != nil {
t.Errorf("OnAdd failed to add resource %T: %#v", obj, obj)
return
}
t.GenericResourceCache.Add(gr)
}
func (t *Translator) OnUpdate(oldObj, newObj interface{}) {
gr, err := NewGenericResource(newObj)
if err != nil {
t.Errorf("OnUpdate failed to update resource %T: %#v", newObj, newObj)
return
}
t.GenericResourceCache.Add(gr)
}
func (t *Translator) OnDelete(obj interface{}) {
gr, err := NewGenericResource(obj)
if err != nil {
t.Errorf("OnDelete failed to delete resource %T: %#v", obj, obj)
return
}
t.GenericResourceCache.Remove(gr.GetIdentifier())
}

119
internal/k8s/watcher.go Normal file
View File

@ -0,0 +1,119 @@
package k8s
import (
"time"
"github.com/keel-hq/keel/internal/workgroup"
"github.com/sirupsen/logrus"
apps_v1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
)
// WatchDeployments creates a SharedInformer for apps/v1.Deployments and registers it with g.
func WatchDeployments(g *workgroup.Group, client *kubernetes.Clientset, log logrus.FieldLogger, rs ...cache.ResourceEventHandler) {
watch(g, client.AppsV1().RESTClient(), log, "deployments", new(apps_v1.Deployment), rs...)
}
// WatchStatefulSets creates a SharedInformer for apps/v1.StatefulSet and registers it with g.
func WatchStatefulSets(g *workgroup.Group, client *kubernetes.Clientset, log logrus.FieldLogger, rs ...cache.ResourceEventHandler) {
watch(g, client.AppsV1().RESTClient(), log, "statefulsets", new(apps_v1.StatefulSet), rs...)
}
// WatchDaemonSets creates a SharedInformer for apps/v1.DaemonSet and registers it with g.
func WatchDaemonSets(g *workgroup.Group, client *kubernetes.Clientset, log logrus.FieldLogger, rs ...cache.ResourceEventHandler) {
watch(g, client.AppsV1().RESTClient(), log, "daemonsets", new(apps_v1.DaemonSet), rs...)
}
func watch(g *workgroup.Group, c cache.Getter, log logrus.FieldLogger, resource string, objType runtime.Object, rs ...cache.ResourceEventHandler) {
lw := cache.NewListWatchFromClient(c, resource, v1.NamespaceAll, fields.Everything())
sw := cache.NewSharedInformer(lw, objType, 30*time.Minute)
for _, r := range rs {
sw.AddEventHandler(r)
}
g.Add(func(stop <-chan struct{}) {
log := log.WithField("resource", resource)
log.Println("started")
defer log.Println("stopped")
sw.Run(stop)
})
}
type buffer struct {
ev chan interface{}
logrus.StdLogger
rh cache.ResourceEventHandler
}
type addEvent struct {
obj interface{}
}
type updateEvent struct {
oldObj, newObj interface{}
}
type deleteEvent struct {
obj interface{}
}
// NewBuffer returns a ResourceEventHandler which buffers and serialises ResourceEventHandler events.
func NewBuffer(g *workgroup.Group, rh cache.ResourceEventHandler, log logrus.FieldLogger, size int) cache.ResourceEventHandler {
buf := &buffer{
ev: make(chan interface{}, size),
StdLogger: log.WithField("context", "buffer"),
rh: rh,
}
g.Add(buf.loop)
return buf
}
func (b *buffer) loop(stop <-chan struct{}) {
b.Println("started")
defer b.Println("stopped")
for {
select {
case ev := <-b.ev:
switch ev := ev.(type) {
case *addEvent:
b.rh.OnAdd(ev.obj)
case *updateEvent:
b.rh.OnUpdate(ev.oldObj, ev.newObj)
case *deleteEvent:
b.rh.OnDelete(ev.obj)
default:
b.Printf("unhandled event type: %T: %v", ev, ev)
}
case <-stop:
return
}
}
}
func (b *buffer) OnAdd(obj interface{}) {
b.send(&addEvent{obj})
}
func (b *buffer) OnUpdate(oldObj, newObj interface{}) {
b.send(&updateEvent{oldObj, newObj})
}
func (b *buffer) OnDelete(obj interface{}) {
b.send(&deleteEvent{obj})
}
func (b *buffer) send(ev interface{}) {
select {
case b.ev <- ev:
// all good
default:
b.Printf("event channel is full, len: %v, cap: %v", len(b.ev), cap(b.ev))
b.ev <- ev
}
}

View File

@ -0,0 +1,36 @@
package workgroup
import "sync"
// Group manages a set of goroutines with related lifetimes.
type Group struct {
fn []func(<-chan struct{})
}
// Add adds a function to the Group. Must be called before Run.
func (g *Group) Add(fn func(<-chan struct{})) {
g.fn = append(g.fn, fn)
}
// Run exectues each function registered with Add in its own goroutine.
// Run blocks until each function has returned.
// The first function to return will trigger the closure of the channel
// passed to each function, who should in turn, return.
func (g *Group) Run() {
var wg sync.WaitGroup
wg.Add(len(g.fn))
stop := make(chan struct{})
result := make(chan error, len(g.fn))
for _, fn := range g.fn {
go func(fn func(<-chan struct{})) {
defer wg.Done()
fn(stop)
result <- nil
}(fn)
}
<-result
close(stop)
wg.Wait()
}

View File

@ -260,7 +260,6 @@ func (p *Provider) createUpdatePlans(event *types.Event) ([]*UpdatePlan, error)
newVersion, err := version.GetVersion(event.Repository.Tag) newVersion, err := version.GetVersion(event.Repository.Tag)
if err != nil { if err != nil {
plan, update, errCheck := checkUnversionedRelease(&event.Repository, release.Namespace, release.Name, release.Chart, release.Config) plan, update, errCheck := checkUnversionedRelease(&event.Repository, release.Namespace, release.Name, release.Chart, release.Config)
if errCheck != nil { if errCheck != nil {
log.WithFields(log.Fields{ log.WithFields(log.Fields{

View File

@ -52,7 +52,6 @@ func checkUnversionedRelease(repo *types.Repository, namespace, name string, cha
// checking for impacted images // checking for impacted images
for _, imageDetails := range keelCfg.Images { for _, imageDetails := range keelCfg.Images {
imageRef, err := parseImage(vals, &imageDetails) imageRef, err := parseImage(vals, &imageDetails)
if err != nil { if err != nil {
log.WithFields(log.Fields{ log.WithFields(log.Fields{

View File

@ -22,9 +22,9 @@ func (p *Provider) checkForApprovals(event *types.Event, plans []*UpdatePlan) (a
approved, err := p.isApproved(event, plan) approved, err := p.isApproved(event, plan)
if err != nil { if err != nil {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"error": err, "error": err,
"deployment": plan.Deployment.Name, "name": plan.Resource.Name,
"namespace": plan.Deployment.Namespace, "namespace": plan.Resource.Namespace,
}).Error("provider.kubernetes: failed to check approval status for deployment") }).Error("provider.kubernetes: failed to check approval status for deployment")
continue continue
} }
@ -36,7 +36,7 @@ func (p *Provider) checkForApprovals(event *types.Event, plans []*UpdatePlan) (a
} }
func (p *Provider) isApproved(event *types.Event, plan *UpdatePlan) (bool, error) { func (p *Provider) isApproved(event *types.Event, plan *UpdatePlan) (bool, error) {
labels := plan.Deployment.GetLabels() labels := plan.Resource.GetLabels()
minApprovalsStr, ok := labels[types.KeelMinimumApprovalsLabel] minApprovalsStr, ok := labels[types.KeelMinimumApprovalsLabel]
if !ok { if !ok {
@ -64,7 +64,7 @@ func (p *Provider) isApproved(event *types.Event, plan *UpdatePlan) (bool, error
} }
} }
identifier := getIdentifier(plan.Deployment.Namespace, plan.Deployment.Name, plan.NewVersion) identifier := getIdentifier(plan.Resource.Namespace, plan.Resource.Name, plan.NewVersion)
// checking for existing approval // checking for existing approval
existing, err := p.approvalManager.Get(identifier) existing, err := p.approvalManager.Get(identifier)
@ -84,13 +84,13 @@ func (p *Provider) isApproved(event *types.Event, plan *UpdatePlan) (bool, error
Deadline: time.Now().Add(time.Duration(deadline) * time.Hour), Deadline: time.Now().Add(time.Duration(deadline) * time.Hour),
} }
approval.Message = fmt.Sprintf("New image is available for deployment %s/%s (%s).", approval.Message = fmt.Sprintf("New image is available for resource %s/%s (%s).",
plan.Deployment.Namespace, plan.Resource.Namespace,
plan.Deployment.Name, plan.Resource.Name,
approval.Delta(), approval.Delta(),
) )
fmt.Println("requesting approval, ns: ", plan.Deployment.Namespace) fmt.Println("requesting approval, ns: ", plan.Resource.Namespace)
return false, p.approvalManager.Create(approval) return false, p.approvalManager.Create(approval)
} }

View File

@ -4,10 +4,11 @@ import (
"testing" "testing"
"time" "time"
"github.com/keel-hq/keel/internal/k8s"
"github.com/keel-hq/keel/types" "github.com/keel-hq/keel/types"
apps_v1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
@ -23,33 +24,36 @@ func TestCheckRequestedApproval(t *testing.T) {
}, },
}, },
} }
fp.deploymentList = &v1beta1.DeploymentList{ deployments := []*apps_v1.Deployment{
Items: []v1beta1.Deployment{ {
v1beta1.Deployment{ meta_v1.TypeMeta{},
meta_v1.TypeMeta{}, meta_v1.ObjectMeta{
meta_v1.ObjectMeta{ Name: "dep-1",
Name: "dep-1", Namespace: "xxxx",
Namespace: "xxxx", Labels: map[string]string{types.KeelPolicyLabel: "all", types.KeelMinimumApprovalsLabel: "1"},
Labels: map[string]string{types.KeelPolicyLabel: "all", types.KeelMinimumApprovalsLabel: "1"}, Annotations: map[string]string{},
Annotations: map[string]string{}, },
}, apps_v1.DeploymentSpec{
v1beta1.DeploymentSpec{ Template: v1.PodTemplateSpec{
Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{
Spec: v1.PodSpec{ Containers: []v1.Container{
Containers: []v1.Container{ v1.Container{
v1.Container{ Image: "gcr.io/v2-namespace/hello-world:1.1.1",
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
},
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
apps_v1.DeploymentStatus{},
}, },
} }
grs := MustParseGRS(deployments)
grc := &k8s.GenericResourceCache{}
grc.Add(grs...)
approver := approver() approver := approver()
provider, err := NewProvider(fp, &fakeSender{}, approver) provider, err := NewProvider(fp, &fakeSender{}, approver, grc)
if err != nil { if err != nil {
t.Fatalf("failed to get provider: %s", err) t.Fatalf("failed to get provider: %s", err)
} }
@ -91,33 +95,35 @@ func TestApprovedCheck(t *testing.T) {
}, },
}, },
} }
fp.deploymentList = &v1beta1.DeploymentList{ deployments := []*apps_v1.Deployment{
Items: []v1beta1.Deployment{ {
v1beta1.Deployment{ meta_v1.TypeMeta{},
meta_v1.TypeMeta{}, meta_v1.ObjectMeta{
meta_v1.ObjectMeta{ Name: "dep-1",
Name: "dep-1", Namespace: "xxxx",
Namespace: "xxxx", Labels: map[string]string{types.KeelPolicyLabel: "all", types.KeelMinimumApprovalsLabel: "1"},
Labels: map[string]string{types.KeelPolicyLabel: "all", types.KeelMinimumApprovalsLabel: "1"}, Annotations: map[string]string{},
Annotations: map[string]string{}, },
}, apps_v1.DeploymentSpec{
v1beta1.DeploymentSpec{ Template: v1.PodTemplateSpec{
Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{
Spec: v1.PodSpec{ Containers: []v1.Container{
Containers: []v1.Container{ v1.Container{
v1.Container{ Image: "gcr.io/v2-namespace/hello-world:1.1.1",
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
},
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
apps_v1.DeploymentStatus{},
}, },
} }
grs := MustParseGRS(deployments)
grc := &k8s.GenericResourceCache{}
grc.Add(grs...)
approver := approver() approver := approver()
provider, err := NewProvider(fp, &fakeSender{}, approver) provider, err := NewProvider(fp, &fakeSender{}, approver, grc)
if err != nil { if err != nil {
t.Fatalf("failed to get provider: %s", err) t.Fatalf("failed to get provider: %s", err)
} }

View File

@ -1,36 +0,0 @@
package kubernetes
import (
"strings"
)
func addImageToPull(annotations map[string]string, image string) map[string]string {
existing, ok := annotations[forceUpdateImageAnnotation]
if ok {
// check if it's already there
if shouldPullImage(annotations, image) {
// skipping
return annotations
}
annotations[forceUpdateImageAnnotation] = existing + "," + image
return annotations
}
annotations[forceUpdateImageAnnotation] = image
return annotations
}
func shouldPullImage(annotations map[string]string, image string) bool {
imagesStr, ok := annotations[forceUpdateImageAnnotation]
if !ok {
return false
}
images := strings.Split(imagesStr, ",")
for _, img := range images {
if img == image {
return true
}
}
return false
}

View File

@ -1,76 +0,0 @@
package kubernetes
import (
"reflect"
"testing"
)
func Test_addImageToPull(t *testing.T) {
type args struct {
annotations map[string]string
image string
}
tests := []struct {
name string
args args
want map[string]string
}{
{
name: "empty",
args: args{annotations: make(map[string]string), image: "whatever"},
want: map[string]string{forceUpdateImageAnnotation: "whatever"},
},
{
name: "not empty",
args: args{annotations: map[string]string{forceUpdateImageAnnotation: "foo"}, image: "bar"},
want: map[string]string{forceUpdateImageAnnotation: "foo,bar"},
},
{
name: "not empty with same image",
args: args{annotations: map[string]string{forceUpdateImageAnnotation: "foo"}, image: "foo"},
want: map[string]string{forceUpdateImageAnnotation: "foo"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := addImageToPull(tt.args.annotations, tt.args.image); !reflect.DeepEqual(got, tt.want) {
t.Errorf("addImageToPull() = %v, want %v", got, tt.want)
}
})
}
}
func Test_shouldPullImage(t *testing.T) {
type args struct {
annotations map[string]string
image string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "should pull single image",
args: args{annotations: map[string]string{forceUpdateImageAnnotation: "bar"}, image: "bar"},
want: true,
},
{
name: "should pull multiple image",
args: args{annotations: map[string]string{forceUpdateImageAnnotation: "foo,bar,whatever"}, image: "bar"},
want: true,
},
{
name: "should not pull multiple image",
args: args{annotations: map[string]string{forceUpdateImageAnnotation: "foo,bar,whatever"}, image: "alpha"},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := shouldPullImage(tt.args.annotations, tt.args.image); got != tt.want {
t.Errorf("shouldPullImage() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -1,131 +1,134 @@
package kubernetes package kubernetes
import ( // import (
"testing" // "testing"
"time" // "time"
"github.com/keel-hq/keel/types" // "github.com/keel-hq/keel/internal/k8s"
"k8s.io/api/core/v1" // "github.com/keel-hq/keel/types"
"k8s.io/api/extensions/v1beta1" // "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" // "k8s.io/api/extensions/v1beta1"
) // meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// )
func TestForceUpdate(t *testing.T) { // func TestForceUpdate(t *testing.T) {
fp := &fakeImplementer{} // fp := &fakeImplementer{}
dep := &v1beta1.Deployment{ // dep := &v1beta1.Deployment{
meta_v1.TypeMeta{}, // meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ // meta_v1.ObjectMeta{
Name: "deployment-1", // Name: "deployment-1",
Namespace: "xx", // Namespace: "xx",
Labels: map[string]string{types.KeelPolicyLabel: "all"}, // Labels: map[string]string{types.KeelPolicyLabel: "all"},
}, // },
v1beta1.DeploymentSpec{}, // v1beta1.DeploymentSpec{},
v1beta1.DeploymentStatus{}, // v1beta1.DeploymentStatus{},
} // }
fp.podList = &v1.PodList{ // grc := &k8s.GenericResourceCache{}
Items: []v1.Pod{ // fp.podList = &v1.PodList{
v1.Pod{ // Items: []v1.Pod{
ObjectMeta: meta_v1.ObjectMeta{ // v1.Pod{
Name: "1", // ObjectMeta: meta_v1.ObjectMeta{
Namespace: "xx", // Name: "1",
}, // Namespace: "xx",
}, // },
v1.Pod{ // },
ObjectMeta: meta_v1.ObjectMeta{ // v1.Pod{
Name: "2", // ObjectMeta: meta_v1.ObjectMeta{
Namespace: "xx", // Name: "2",
}, // Namespace: "xx",
}, // },
}, // },
} // },
// }
provider, err := NewProvider(fp, &fakeSender{}, approver()) // provider, err := NewProvider(fp, &fakeSender{}, approver(), grc)
if err != nil { // if err != nil {
t.Fatalf("failed to get provider: %s", err) // t.Fatalf("failed to get provider: %s", err)
} // }
err = provider.forceUpdate(dep) // err = provider.forceUpdate(dep)
if err != nil { // if err != nil {
t.Fatalf("failed to force update: %s", err) // t.Fatalf("failed to force update: %s", err)
} // }
if len(fp.deletedPods) != 2 { // if len(fp.deletedPods) != 2 {
t.Errorf("expected to get 2 deleted pods") // t.Errorf("expected to get 2 deleted pods")
} // }
if fp.deletedPods[0].Namespace != "xx" { // if fp.deletedPods[0].Namespace != "xx" {
t.Errorf("wrong namespace: %s", fp.deletedPods[0].Namespace) // t.Errorf("wrong namespace: %s", fp.deletedPods[0].Namespace)
} // }
if fp.deletedPods[1].Namespace != "xx" { // if fp.deletedPods[1].Namespace != "xx" {
t.Errorf("wrong namespace: %s", fp.deletedPods[1].Namespace) // t.Errorf("wrong namespace: %s", fp.deletedPods[1].Namespace)
} // }
if fp.deletedPods[0].Name != "1" { // if fp.deletedPods[0].Name != "1" {
t.Errorf("wrong name: %s", fp.deletedPods[0].Name) // t.Errorf("wrong name: %s", fp.deletedPods[0].Name)
} // }
if fp.deletedPods[1].Name != "2" { // if fp.deletedPods[1].Name != "2" {
t.Errorf("wrong name: %s", fp.deletedPods[1].Name) // t.Errorf("wrong name: %s", fp.deletedPods[1].Name)
} // }
} // }
func TestForceUpdateDelay(t *testing.T) { // func TestForceUpdateDelay(t *testing.T) {
fp := &fakeImplementer{} // fp := &fakeImplementer{}
dep := &v1beta1.Deployment{ // dep := &v1beta1.Deployment{
meta_v1.TypeMeta{}, // meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ // meta_v1.ObjectMeta{
Name: "deployment-1", // Name: "deployment-1",
Namespace: "xx", // Namespace: "xx",
Labels: map[string]string{types.KeelPolicyLabel: "all"}, // Labels: map[string]string{types.KeelPolicyLabel: "all"},
Annotations: map[string]string{types.KeelPodDeleteDelay: "300"}, // Annotations: map[string]string{types.KeelPodDeleteDelay: "300"},
}, // },
v1beta1.DeploymentSpec{}, // v1beta1.DeploymentSpec{},
v1beta1.DeploymentStatus{}, // v1beta1.DeploymentStatus{},
} // }
fp.podList = &v1.PodList{ // fp.podList = &v1.PodList{
Items: []v1.Pod{ // Items: []v1.Pod{
v1.Pod{ // v1.Pod{
ObjectMeta: meta_v1.ObjectMeta{ // ObjectMeta: meta_v1.ObjectMeta{
Name: "1", // Name: "1",
Namespace: "xx", // Namespace: "xx",
}, // },
}, // },
v1.Pod{ // v1.Pod{
ObjectMeta: meta_v1.ObjectMeta{ // ObjectMeta: meta_v1.ObjectMeta{
Name: "2", // Name: "2",
Namespace: "xx", // Namespace: "xx",
}, // },
}, // },
}, // },
} // }
provider, err := NewProvider(fp, &fakeSender{}, approver()) // grc := &k8s.GenericResourceCache{}
if err != nil { // provider, err := NewProvider(fp, &fakeSender{}, approver(), grc)
t.Fatalf("failed to get provider: %s", err) // if err != nil {
} // t.Fatalf("failed to get provider: %s", err)
// }
go func() { // go func() {
err = provider.forceUpdate(dep) // err = provider.forceUpdate(dep)
if err != nil { // if err != nil {
t.Fatalf("failed to force update: %s", err) // t.Fatalf("failed to force update: %s", err)
} // }
}() // }()
time.Sleep(100 * time.Millisecond) // time.Sleep(100 * time.Millisecond)
if len(fp.deletedPods) != 1 { // if len(fp.deletedPods) != 1 {
t.Errorf("expected to get 1 deleted pods, another one should be delayed") // t.Errorf("expected to get 1 deleted pods, another one should be delayed")
} // }
if fp.deletedPods[0].Namespace != "xx" { // if fp.deletedPods[0].Namespace != "xx" {
t.Errorf("wrong namespace: %s", fp.deletedPods[0].Namespace) // t.Errorf("wrong namespace: %s", fp.deletedPods[0].Namespace)
} // }
if fp.deletedPods[0].Name != "1" { // if fp.deletedPods[0].Name != "1" {
t.Errorf("wrong name: %s", fp.deletedPods[0].Name) // t.Errorf("wrong name: %s", fp.deletedPods[0].Name)
} // }
} // }

View File

@ -3,14 +3,17 @@ package kubernetes
import ( import (
"fmt" "fmt"
"github.com/keel-hq/keel/internal/k8s"
apps_v1 "k8s.io/api/apps/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
core_v1 "k8s.io/client-go/kubernetes/typed/core/v1" core_v1 "k8s.io/client-go/kubernetes/typed/core/v1"
// core_v1 "k8s.io/api/core/v1"
// "k8s.io/api/core/v1" // "k8s.io/api/core/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
@ -21,9 +24,10 @@ import (
type Implementer interface { type Implementer interface {
Namespaces() (*v1.NamespaceList, error) Namespaces() (*v1.NamespaceList, error)
Deployment(namespace, name string) (*v1beta1.Deployment, error) // Deployment(namespace, name string) (*v1beta1.Deployment, error)
Deployments(namespace string) (*v1beta1.DeploymentList, error) Deployments(namespace string) (*apps_v1.DeploymentList, error)
Update(deployment *v1beta1.Deployment) error // Update(deployment *v1beta1.Deployment) error
Update(obj *k8s.GenericResource) error
Secret(namespace, name string) (*v1.Secret, error) Secret(namespace, name string) (*v1.Secret, error)
@ -87,6 +91,10 @@ func NewKubernetesImplementer(opts *Opts) (*KubernetesImplementer, error) {
return &KubernetesImplementer{client: client, cfg: cfg}, nil return &KubernetesImplementer{client: client, cfg: cfg}, nil
} }
func (i *KubernetesImplementer) Client() *kubernetes.Clientset {
return i.client
}
// Namespaces - get all namespaces // Namespaces - get all namespaces
func (i *KubernetesImplementer) Namespaces() (*v1.NamespaceList, error) { func (i *KubernetesImplementer) Namespaces() (*v1.NamespaceList, error) {
namespaces := i.client.Core().Namespaces() namespaces := i.client.Core().Namespaces()
@ -94,20 +102,20 @@ func (i *KubernetesImplementer) Namespaces() (*v1.NamespaceList, error) {
} }
// Deployment - get specific deployment for namespace/name // Deployment - get specific deployment for namespace/name
func (i *KubernetesImplementer) Deployment(namespace, name string) (*v1beta1.Deployment, error) { func (i *KubernetesImplementer) Deployment(namespace, name string) (*apps_v1.Deployment, error) {
dep := i.client.Extensions().Deployments(namespace) dep := i.client.Apps().Deployments(namespace)
return dep.Get(name, meta_v1.GetOptions{}) return dep.Get(name, meta_v1.GetOptions{})
} }
// Deployments - get all deployments for namespace // Deployments - get all deployments for namespace
func (i *KubernetesImplementer) Deployments(namespace string) (*v1beta1.DeploymentList, error) { func (i *KubernetesImplementer) Deployments(namespace string) (*apps_v1.DeploymentList, error) {
dep := i.client.Extensions().Deployments(namespace) dep := i.client.Apps().Deployments(namespace)
l, err := dep.List(meta_v1.ListOptions{}) l, err := dep.List(meta_v1.ListOptions{})
return l, err return l, err
} }
// Update - update deployment // Update converts generic resource into specific kubernetes type and updates it
func (i *KubernetesImplementer) Update(deployment *v1beta1.Deployment) error { func (i *KubernetesImplementer) Update(obj *k8s.GenericResource) error {
// retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { // retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// // Retrieve the latest version of Deployment before attempting update // // Retrieve the latest version of Deployment before attempting update
// // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver // // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
@ -116,8 +124,26 @@ func (i *KubernetesImplementer) Update(deployment *v1beta1.Deployment) error {
// }) // })
// return retryErr // return retryErr
_, err := i.client.Extensions().Deployments(deployment.Namespace).Update(deployment) switch resource := obj.GetResource().(type) {
return err case *apps_v1.Deployment:
_, err := i.client.Apps().Deployments(resource.Namespace).Update(resource)
if err != nil {
return err
}
case *apps_v1.StatefulSet:
_, err := i.client.Apps().StatefulSets(resource.Namespace).Update(resource)
if err != nil {
return err
}
case *apps_v1.DaemonSet:
_, err := i.client.Apps().DaemonSets(resource.Namespace).Update(resource)
if err != nil {
return err
}
default:
return fmt.Errorf("unsupported object type")
}
return nil
} }
// Secret - get secret // Secret - get secret

View File

@ -9,12 +9,12 @@ import (
"github.com/rusenask/cron" "github.com/rusenask/cron"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/keel-hq/keel/approvals" "github.com/keel-hq/keel/approvals"
"github.com/keel-hq/keel/extension/notification" "github.com/keel-hq/keel/extension/notification"
"github.com/keel-hq/keel/internal/k8s"
"github.com/keel-hq/keel/types" "github.com/keel-hq/keel/types"
"github.com/keel-hq/keel/util/image" "github.com/keel-hq/keel/util/image"
"github.com/keel-hq/keel/util/policies" "github.com/keel-hq/keel/util/policies"
@ -49,13 +49,22 @@ const ProviderName = "kubernetes"
var versionreg = regexp.MustCompile(`:[^:]*$`) var versionreg = regexp.MustCompile(`:[^:]*$`)
// annotation used to specify which image to force pull // GenericResourceCache an interface for generic resource cache.
const forceUpdateImageAnnotation = "keel.sh/update-image" type GenericResourceCache interface {
// Values returns a copy of the contents of the cache.
// The slice and its contents should be treated as read-only.
Values() []*k8s.GenericResource
// Register registers ch to receive a value when Notify is called.
Register(chan int, int)
}
// UpdatePlan - deployment update plan // UpdatePlan - deployment update plan
type UpdatePlan struct { type UpdatePlan struct {
// Updated deployment version // Updated deployment version
Deployment v1beta1.Deployment // Deployment v1beta1.Deployment
Resource *k8s.GenericResource
// Current (last seen cluster version) // Current (last seen cluster version)
CurrentVersion string CurrentVersion string
// New version that's already in the deployment // New version that's already in the deployment
@ -70,14 +79,17 @@ type Provider struct {
approvalManager approvals.Manager approvalManager approvals.Manager
cache GenericResourceCache
events chan *types.Event events chan *types.Event
stop chan struct{} stop chan struct{}
} }
// NewProvider - create new kubernetes based provider // NewProvider - create new kubernetes based provider
func NewProvider(implementer Implementer, sender notification.Sender, approvalManager approvals.Manager) (*Provider, error) { func NewProvider(implementer Implementer, sender notification.Sender, approvalManager approvals.Manager, cache GenericResourceCache) (*Provider, error) {
return &Provider{ return &Provider{
implementer: implementer, implementer: implementer,
cache: cache,
approvalManager: approvalManager, approvalManager: approvalManager,
events: make(chan *types.Event, 100), events: make(chan *types.Event, 100),
stop: make(chan struct{}), stop: make(chan struct{}),
@ -106,68 +118,64 @@ func (p *Provider) Stop() {
close(p.stop) close(p.stop)
} }
// TrackedImages - get tracked images // TrackedImages returns a list of tracked images.
func (p *Provider) TrackedImages() ([]*types.TrackedImage, error) { func (p *Provider) TrackedImages() ([]*types.TrackedImage, error) {
var trackedImages []*types.TrackedImage var trackedImages []*types.TrackedImage
deploymentLists, err := p.deployments() for _, gr := range p.cache.Values() {
if err != nil { labels := gr.GetLabels()
return nil, err
}
for _, deploymentList := range deploymentLists { // ignoring unlabelled deployments
for _, deployment := range deploymentList.Items { policy := policies.GetPolicy(labels)
labels := deployment.GetLabels() if policy == types.PolicyTypeNone {
continue
}
// ignoring unlabelled deployments annotations := gr.GetAnnotations()
policy := policies.GetPolicy(labels) schedule, ok := annotations[types.KeelPollScheduleAnnotation]
if policy == types.PolicyTypeNone { if ok {
continue _, err := cron.Parse(schedule)
} if err != nil {
log.WithFields(log.Fields{
annotations := deployment.GetAnnotations() "error": err,
schedule, ok := annotations[types.KeelPollScheduleAnnotation] "schedule": schedule,
if ok { "name": gr.Name,
_, err := cron.Parse(schedule) "namespace": gr.Namespace,
if err != nil { }).Error("provider.kubernetes: failed to parse poll schedule, setting default schedule")
log.WithFields(log.Fields{
"error": err,
"schedule": schedule,
"deployment": deployment.Name,
"namespace": deployment.Namespace,
}).Error("provider.kubernetes: failed to parse poll schedule, setting default schedule")
schedule = types.KeelPollDefaultSchedule
}
} else {
schedule = types.KeelPollDefaultSchedule schedule = types.KeelPollDefaultSchedule
} }
} else {
schedule = types.KeelPollDefaultSchedule
}
// trigger type, we only care for "poll" type triggers // trigger type, we only care for "poll" type triggers
trigger := policies.GetTriggerPolicy(labels) trigger := policies.GetTriggerPolicy(labels)
secrets := getImagePullSecrets(&deployment) // secrets := getImagePullSecrets(&deployment)
images := getImages(&deployment) secrets := gr.GetImagePullSecrets()
for _, img := range images {
ref, err := image.Parse(img) // images := getImages(&deployment)
if err != nil { images := gr.GetImages()
log.WithFields(log.Fields{ for _, img := range images {
"error": err, ref, err := image.Parse(img)
"image": img, if err != nil {
"namespace": deployment.Namespace, log.WithFields(log.Fields{
"name": deployment.Name, "error": err,
}).Error("provider.kubernetes: failed to parse image") "image": img,
continue "namespace": gr.Namespace,
} "name": gr.Name,
trackedImages = append(trackedImages, &types.TrackedImage{ }).Error("provider.kubernetes: failed to parse image")
Image: ref, continue
PollSchedule: schedule,
Trigger: trigger,
Provider: ProviderName,
Namespace: deployment.Namespace,
Secrets: secrets,
})
} }
trackedImages = append(trackedImages, &types.TrackedImage{
Image: ref,
PollSchedule: schedule,
Trigger: trigger,
Provider: ProviderName,
Namespace: gr.Namespace,
Secrets: secrets,
})
} }
} }
@ -198,7 +206,8 @@ func (p *Provider) startInternal() error {
} }
} }
func (p *Provider) processEvent(event *types.Event) (updated []*v1beta1.Deployment, err error) { // func (p *Provider) processEvent(event *types.Event) (updated []*v1beta1.Deployment, err error) {
func (p *Provider) processEvent(event *types.Event) (updated []*k8s.GenericResource, err error) {
plans, err := p.createUpdatePlans(&event.Repository) plans, err := p.createUpdatePlans(&event.Repository)
if err != nil { if err != nil {
return nil, err return nil, err
@ -218,16 +227,18 @@ func (p *Provider) processEvent(event *types.Event) (updated []*v1beta1.Deployme
} }
// func (p *Provider) updateDeployments(deployments []v1beta1.Deployment) (updated []*v1beta1.Deployment, err error) { // func (p *Provider) updateDeployments(deployments []v1beta1.Deployment) (updated []*v1beta1.Deployment, err error) {
func (p *Provider) updateDeployments(plans []*UpdatePlan) (updated []*v1beta1.Deployment, err error) { func (p *Provider) updateDeployments(plans []*UpdatePlan) (updated []*k8s.GenericResource, err error) {
// for _, deployment := range plans {
for _, plan := range plans { for _, plan := range plans {
deployment := plan.Deployment resource := plan.Resource
notificationChannels := types.ParseEventNotificationChannels(deployment.Annotations)
reset := checkForReset(deployment) annotations := resource.GetAnnotations()
notificationChannels := types.ParseEventNotificationChannels(annotations)
// reset := checkForReset(deployment)
p.sender.Send(types.EventNotification{ p.sender.Send(types.EventNotification{
Name: "preparing to update deployment", Name: "preparing to update resource",
Message: fmt.Sprintf("Preparing to update deployment %s/%s %s->%s (%s)", deployment.Namespace, deployment.Name, plan.CurrentVersion, plan.NewVersion, strings.Join(getImages(&deployment), ", ")), Message: fmt.Sprintf("Preparing to update %s %s/%s %s->%s (%s)", resource.Kind(), resource.Namespace, resource.Name, plan.CurrentVersion, plan.NewVersion, strings.Join(resource.GetImages(), ", ")),
CreatedAt: time.Now(), CreatedAt: time.Now(),
Type: types.NotificationPreDeploymentUpdate, Type: types.NotificationPreDeploymentUpdate,
Level: types.LevelDebug, Level: types.LevelDebug,
@ -236,27 +247,33 @@ func (p *Provider) updateDeployments(plans []*UpdatePlan) (updated []*v1beta1.De
var err error var err error
if reset { // if reset {
// force update, terminating all pods // annotations["keel.sh/update-time"] = time.Now().String()
err = p.forceUpdate(&deployment) // }
kubernetesUnversionedUpdatesCounter.With(prometheus.Labels{"deployment": fmt.Sprintf("%s/%s", deployment.Namespace, deployment.Name)}).Inc() // force update, terminating all pods
} else { // err = p.forceUpdate(&deployment)
// regular update // kubernetesUnversionedUpdatesCounter.With(prometheus.Labels{"deployment": fmt.Sprintf("%s/%s", deployment.Namespace, deployment.Name)}).Inc()
deployment.Annotations["kubernetes.io/change-cause"] = fmt.Sprintf("keel automated update, version %s -> %s", plan.CurrentVersion, plan.NewVersion) // } else {
err = p.implementer.Update(&deployment) // regular update
kubernetesVersionedUpdatesCounter.With(prometheus.Labels{"deployment": fmt.Sprintf("%s/%s", deployment.Namespace, deployment.Name)}).Inc() // deployment.Annotations["kubernetes.io/change-cause"] = fmt.Sprintf("keel automated update, version %s -> %s", plan.CurrentVersion, plan.NewVersion)
} annotations["kubernetes.io/change-cause"] = fmt.Sprintf("keel automated update, version %s -> %s", plan.CurrentVersion, plan.NewVersion)
resource.SetAnnotations(annotations)
err = p.implementer.Update(resource)
kubernetesVersionedUpdatesCounter.With(prometheus.Labels{resource.Kind(): fmt.Sprintf("%s/%s", resource.Namespace, resource.Name)}).Inc()
// }
if err != nil { if err != nil {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"error": err, "error": err,
"namespace": deployment.Namespace, "namespace": resource.Namespace,
"deployment": deployment.Name, "deployment": resource.Name,
"update": fmt.Sprintf("%s->%s", plan.CurrentVersion, plan.NewVersion), "update": fmt.Sprintf("%s->%s", plan.CurrentVersion, plan.NewVersion),
}).Error("provider.kubernetes: got error while update deployment") }).Error("provider.kubernetes: got error while update deployment")
p.sender.Send(types.EventNotification{ p.sender.Send(types.EventNotification{
Name: "update deployment", Name: "update resource",
Message: fmt.Sprintf("Deployment %s/%s update %s->%s failed, error: %s", deployment.Namespace, deployment.Name, plan.CurrentVersion, plan.NewVersion, err), Message: fmt.Sprintf("%s %s/%s update %s->%s failed, error: %s", resource.Kind(), resource.Namespace, resource.Name, plan.CurrentVersion, plan.NewVersion, err),
CreatedAt: time.Now(), CreatedAt: time.Now(),
Type: types.NotificationDeploymentUpdate, Type: types.NotificationDeploymentUpdate,
Level: types.LevelError, Level: types.LevelError,
@ -267,8 +284,8 @@ func (p *Provider) updateDeployments(plans []*UpdatePlan) (updated []*v1beta1.De
} }
p.sender.Send(types.EventNotification{ p.sender.Send(types.EventNotification{
Name: "update deployment", Name: "update resource",
Message: fmt.Sprintf("Successfully updated deployment %s/%s %s->%s (%s)", deployment.Namespace, deployment.Name, plan.CurrentVersion, plan.NewVersion, strings.Join(getImages(&deployment), ", ")), Message: fmt.Sprintf("Successfully updated %s %s/%s %s->%s (%s)", resource.Kind(), resource.Namespace, resource.Name, plan.CurrentVersion, plan.NewVersion, strings.Join(resource.GetImages(), ", ")),
CreatedAt: time.Now(), CreatedAt: time.Now(),
Type: types.NotificationDeploymentUpdate, Type: types.NotificationDeploymentUpdate,
Level: types.LevelSuccess, Level: types.LevelSuccess,
@ -276,31 +293,31 @@ func (p *Provider) updateDeployments(plans []*UpdatePlan) (updated []*v1beta1.De
}) })
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"name": deployment.Name, "name": resource.Name,
"namespace": deployment.Namespace, "namespace": resource.Namespace,
}).Info("provider.kubernetes: deployment updated") }).Info("provider.kubernetes: deployment updated")
updated = append(updated, &deployment) updated = append(updated, resource)
} }
return return
} }
func getImages(deployment *v1beta1.Deployment) []string { // func getImages(deployment *v1beta1.Deployment) []string {
var images []string // var images []string
for _, c := range deployment.Spec.Template.Spec.Containers { // for _, c := range deployment.Spec.Template.Spec.Containers {
images = append(images, c.Image) // images = append(images, c.Image)
} // }
return images // return images
} // }
func getImagePullSecrets(deployment *v1beta1.Deployment) []string { // func getImagePullSecrets(deployment *v1beta1.Deployment) []string {
var secrets []string // var secrets []string
for _, s := range deployment.Spec.Template.Spec.ImagePullSecrets { // for _, s := range deployment.Spec.Template.Spec.ImagePullSecrets {
secrets = append(secrets, s.Name) // secrets = append(secrets, s.Name)
} // }
return secrets // return secrets
} // }
func getDesiredImage(delta map[string]string, currentImage string) (string, error) { func getDesiredImage(delta map[string]string, currentImage string) (string, error) {
currentRef, err := image.Parse(currentImage) currentRef, err := image.Parse(currentImage)
@ -324,101 +341,76 @@ func getDesiredImage(delta map[string]string, currentImage string) (string, erro
return "", fmt.Errorf("image %s not found in deltas", currentImage) return "", fmt.Errorf("image %s not found in deltas", currentImage)
} }
// checkForReset returns delta to apply after setting image
func checkForReset(deployment v1beta1.Deployment) bool {
reset := false
annotations := deployment.GetAnnotations()
for _, c := range deployment.Spec.Template.Spec.Containers {
if shouldPullImage(annotations, c.Image) {
reset = true
}
}
return reset
}
// getDeployment - helper function to get specific deployment
func (p *Provider) getDeployment(namespace, name string) (*v1beta1.Deployment, error) {
return p.implementer.Deployment(namespace, name)
}
// createUpdatePlans - impacted deployments by changed repository // createUpdatePlans - impacted deployments by changed repository
// func (p *Provider) impactedDeployments(repo *types.Repository) ([]v1beta1.Deployment, error) {
func (p *Provider) createUpdatePlans(repo *types.Repository) ([]*UpdatePlan, error) { func (p *Provider) createUpdatePlans(repo *types.Repository) ([]*UpdatePlan, error) {
deploymentLists, err := p.deployments()
if err != nil {
log.WithFields(log.Fields{
"error": err,
}).Error("provider.kubernetes: failed to get deployment lists")
return nil, err
}
// impacted := []v1beta1.Deployment{}
impacted := []*UpdatePlan{} impacted := []*UpdatePlan{}
for _, deploymentList := range deploymentLists { for _, resource := range p.cache.Values() {
for _, deployment := range deploymentList.Items {
labels := deployment.GetLabels() labels := resource.GetLabels()
policy := policies.GetPolicy(labels) policy := policies.GetPolicy(labels)
if policy == types.PolicyTypeNone { if policy == types.PolicyTypeNone {
// skip // skip
continue log.Infof("no policy defined, skipping: %s, labels: %s", resource.Identifier, labels)
} continue
}
// annotation cleanup // annotation cleanup
annotations := deployment.GetAnnotations() // annotations := gr.GetAnnotations()
delete(annotations, forceUpdateImageAnnotation) // delete(annotations, forceUpdateImageAnnotation)
deployment.SetAnnotations(annotations) // deployment.SetAnnotations(annotations)
newVersion, err := version.GetVersion(repo.Tag) newVersion, err := version.GetVersion(repo.Tag)
if err != nil { if err != nil {
// failed to get new version tag // failed to get new version tag
if policy == types.PolicyTypeForce { if policy == types.PolicyTypeForce {
updated, shouldUpdateDeployment, err := p.checkUnversionedDeployment(policy, repo, deployment) updated, shouldUpdateDeployment, err := p.checkUnversionedDeployment(policy, repo, resource)
if err != nil { if err != nil {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"error": err, "error": err,
"deployment": deployment.Name, "name": resource.Name,
"namespace": deployment.Namespace, "namespace": resource.Namespace,
}).Error("provider.kubernetes: got error while checking unversioned deployment") "kind": resource.Kind(),
continue }).Error("provider.kubernetes: got error while checking unversioned resource")
}
if shouldUpdateDeployment {
impacted = append(impacted, updated)
}
// success, unversioned deployment marked for update
continue continue
} }
log.WithFields(log.Fields{ if shouldUpdateDeployment {
"error": err, impacted = append(impacted, updated)
"repository_tag": repo.Tag, }
"deployment": deployment.Name,
"namespace": deployment.Namespace, // success, unversioned deployment marked for update
"policy": policy,
}).Warn("provider.kubernetes: got error while parsing repository tag")
continue continue
} }
updated, shouldUpdateDeployment, err := p.checkVersionedDeployment(newVersion, policy, repo, deployment) log.WithFields(log.Fields{
if err != nil { "error": err,
log.WithFields(log.Fields{ "repository_tag": repo.Tag,
"error": err, "deployment": resource.Name,
"deployment": deployment.Name, "namespace": resource.Namespace,
"namespace": deployment.Namespace, "kind": resource.Kind(),
}).Error("provider.kubernetes: got error while checking versioned deployment") "policy": policy,
continue }).Warn("provider.kubernetes: got error while parsing repository tag")
} continue
}
if shouldUpdateDeployment { updated, shouldUpdateDeployment, err := p.checkVersionedDeployment(newVersion, policy, repo, resource)
impacted = append(impacted, updated) if err != nil {
} log.WithFields(log.Fields{
"error": err,
"deployment": resource.Name,
"kind": resource.Kind(),
"namespace": resource.Namespace,
}).Error("provider.kubernetes: got error while checking versioned resource")
continue
}
if shouldUpdateDeployment {
impacted = append(impacted, updated)
} }
} }
// }
return impacted, nil return impacted, nil
} }
@ -426,27 +418,3 @@ func (p *Provider) createUpdatePlans(repo *types.Repository) ([]*UpdatePlan, err
func (p *Provider) namespaces() (*v1.NamespaceList, error) { func (p *Provider) namespaces() (*v1.NamespaceList, error) {
return p.implementer.Namespaces() return p.implementer.Namespaces()
} }
// deployments - gets all deployments
func (p *Provider) deployments() ([]*v1beta1.DeploymentList, error) {
deployments := []*v1beta1.DeploymentList{}
n, err := p.namespaces()
if err != nil {
return nil, err
}
for _, n := range n.Items {
l, err := p.implementer.Deployments(n.GetName())
if err != nil {
log.WithFields(log.Fields{
"error": err,
"namespace": n.GetName(),
}).Error("provider.kubernetes: failed to list deployments")
continue
}
deployments = append(deployments, l)
}
return deployments, nil
}

View File

@ -7,11 +7,12 @@ import (
"github.com/keel-hq/keel/approvals" "github.com/keel-hq/keel/approvals"
"github.com/keel-hq/keel/cache/memory" "github.com/keel-hq/keel/cache/memory"
"github.com/keel-hq/keel/extension/notification" "github.com/keel-hq/keel/extension/notification"
"github.com/keel-hq/keel/internal/k8s"
"github.com/keel-hq/keel/types" "github.com/keel-hq/keel/types"
"github.com/keel-hq/keel/util/codecs" "github.com/keel-hq/keel/util/codecs"
apps_v1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
core_v1 "k8s.io/client-go/kubernetes/typed/core/v1" core_v1 "k8s.io/client-go/kubernetes/typed/core/v1"
) )
@ -41,14 +42,14 @@ func (p *fakeProvider) GetName() string {
type fakeImplementer struct { type fakeImplementer struct {
namespaces *v1.NamespaceList namespaces *v1.NamespaceList
deployment *v1beta1.Deployment deployment *apps_v1.Deployment
deploymentList *v1beta1.DeploymentList deploymentList *apps_v1.DeploymentList
podList *v1.PodList podList *v1.PodList
deletedPods []*v1.Pod deletedPods []*v1.Pod
// stores value of an updated deployment // stores value of an updated deployment
updated *v1beta1.Deployment updated *k8s.GenericResource
availableSecret *v1.Secret availableSecret *v1.Secret
} }
@ -57,16 +58,16 @@ func (i *fakeImplementer) Namespaces() (*v1.NamespaceList, error) {
return i.namespaces, nil return i.namespaces, nil
} }
func (i *fakeImplementer) Deployment(namespace, name string) (*v1beta1.Deployment, error) { func (i *fakeImplementer) Deployment(namespace, name string) (*apps_v1.Deployment, error) {
return i.deployment, nil return i.deployment, nil
} }
func (i *fakeImplementer) Deployments(namespace string) (*v1beta1.DeploymentList, error) { func (i *fakeImplementer) Deployments(namespace string) (*apps_v1.DeploymentList, error) {
return i.deploymentList, nil return i.deploymentList, nil
} }
func (i *fakeImplementer) Update(deployment *v1beta1.Deployment) error { func (i *fakeImplementer) Update(obj *k8s.GenericResource) error {
i.updated = deployment i.updated = obj
return nil return nil
} }
@ -128,7 +129,9 @@ func TestGetNamespaces(t *testing.T) {
}, },
} }
provider, err := NewProvider(fi, &fakeSender{}, approver()) grc := &k8s.GenericResourceCache{}
provider, err := NewProvider(fi, &fakeSender{}, approver(), grc)
if err != nil { if err != nil {
t.Fatalf("failed to get provider: %s", err) t.Fatalf("failed to get provider: %s", err)
} }
@ -150,59 +153,81 @@ func TestGetImageName(t *testing.T) {
} }
} }
func TestGetDeployments(t *testing.T) { // func TestGetDeployments(t *testing.T) {
fp := &fakeImplementer{} // fp := &fakeImplementer{}
fp.namespaces = &v1.NamespaceList{ // fp.namespaces = &v1.NamespaceList{
Items: []v1.Namespace{ // Items: []v1.Namespace{
v1.Namespace{ // v1.Namespace{
meta_v1.TypeMeta{}, // meta_v1.TypeMeta{},
meta_v1.ObjectMeta{Name: "xxxx"}, // meta_v1.ObjectMeta{Name: "xxxx"},
v1.NamespaceSpec{}, // v1.NamespaceSpec{},
v1.NamespaceStatus{}, // v1.NamespaceStatus{},
}, // },
}, // },
} // }
fp.deploymentList = &v1beta1.DeploymentList{ // fp.deploymentList = &apps_v1.DeploymentList{
Items: []v1beta1.Deployment{ // Items: []apps_v1.Deployment{
v1beta1.Deployment{ // apps_v1.Deployment{
meta_v1.TypeMeta{}, // meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ // meta_v1.ObjectMeta{
Name: "dep-1", // Name: "dep-1",
Namespace: "xxxx", // Namespace: "xxxx",
Labels: map[string]string{types.KeelPolicyLabel: "all"}, // Labels: map[string]string{types.KeelPolicyLabel: "all"},
}, // },
v1beta1.DeploymentSpec{ // apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ // Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{ // Spec: v1.PodSpec{
Containers: []v1.Container{ // Containers: []v1.Container{
v1.Container{ // v1.Container{
Image: "gcr.io/v2-namespace/hello-world:1.1", // Image: "gcr.io/v2-namespace/hello-world:1.1",
}, // },
}, // },
}, // },
}, // },
}, // },
v1beta1.DeploymentStatus{}, // apps_v1.DeploymentStatus{},
}, // },
}, // },
} // }
// grc := &k8s.GenericResourceCache{}
// provider, err := NewProvider(fp, &fakeSender{}, approver(), grc)
// if err != nil {
// t.Fatalf("failed to get provider: %s", err)
// }
provider, err := NewProvider(fp, &fakeSender{}, approver()) // deps, err := provider.deployments()
// if err != nil {
// t.Errorf("failed to get deployments: %s", err)
// }
// if len(deps) != 1 {
// t.Errorf("expected to find 1 deployment, got: %d", len(deps))
// }
// if deps[0].Items[0].GetName() != "dep-1" {
// t.Errorf("expected name %s, got %s", "dep-1", deps[0].Items[0].GetName())
// }
// }
func MustParseGR(obj interface{}) *k8s.GenericResource {
gr, err := k8s.NewGenericResource(obj)
if err != nil { if err != nil {
t.Fatalf("failed to get provider: %s", err) panic(err)
} }
return gr
}
deps, err := provider.deployments() func MustParseGRS(objs []*apps_v1.Deployment) []*k8s.GenericResource {
if err != nil { grs := make([]*k8s.GenericResource, len(objs))
t.Errorf("failed to get deployments: %s", err) for idx, obj := range objs {
} var err error
if len(deps) != 1 { var gr *k8s.GenericResource
t.Errorf("expected to find 1 deployment, got: %d", len(deps)) gr, err = k8s.NewGenericResource(obj)
} if err != nil {
panic(err)
if deps[0].Items[0].GetName() != "dep-1" { }
t.Errorf("expected name %s, got %s", "dep-1", deps[0].Items[0].GetName()) grs[idx] = gr
} }
return grs
} }
func TestGetImpacted(t *testing.T) { func TestGetImpacted(t *testing.T) {
@ -217,52 +242,55 @@ func TestGetImpacted(t *testing.T) {
}, },
}, },
} }
fp.deploymentList = &v1beta1.DeploymentList{
Items: []v1beta1.Deployment{ deps := []*apps_v1.Deployment{
v1beta1.Deployment{ {
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
Namespace: "xxxx", Namespace: "xxxx",
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Labels: map[string]string{types.KeelPolicyLabel: "all"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
Image: "gcr.io/v2-namespace/hello-world:1.1.1", Image: "gcr.io/v2-namespace/hello-world:1.1.1",
},
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
v1beta1.Deployment{ apps_v1.DeploymentStatus{},
meta_v1.TypeMeta{}, },
meta_v1.ObjectMeta{ {
Name: "dep-2", meta_v1.TypeMeta{},
Namespace: "xxxx", meta_v1.ObjectMeta{
Labels: map[string]string{"whatever": "all"}, Name: "dep-2",
}, Namespace: "xxxx",
v1beta1.DeploymentSpec{ Labels: map[string]string{"whatever": "all"},
Template: v1.PodTemplateSpec{ },
Spec: v1.PodSpec{ apps_v1.DeploymentSpec{
Containers: []v1.Container{ Template: v1.PodTemplateSpec{
v1.Container{ Spec: v1.PodSpec{
Image: "gcr.io/v2-namespace/hello-world:1.1.1", Containers: []v1.Container{
}, v1.Container{
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
apps_v1.DeploymentStatus{},
}, },
} }
provider, err := NewProvider(fp, &fakeSender{}, approver()) grs := MustParseGRS(deps)
grc := &k8s.GenericResourceCache{}
grc.Add(grs...)
provider, err := NewProvider(fp, &fakeSender{}, approver(), grc)
if err != nil { if err != nil {
t.Fatalf("failed to get provider: %s", err) t.Fatalf("failed to get provider: %s", err)
} }
@ -279,11 +307,11 @@ func TestGetImpacted(t *testing.T) {
} }
if len(plans) != 1 { if len(plans) != 1 {
t.Errorf("expected to find 1 deployment update plan but found %d", len(plans)) t.Fatalf("expected to find 1 deployment update plan but found %d", len(plans))
} }
found := false found := false
for _, c := range plans[0].Deployment.Spec.Template.Spec.Containers { for _, c := range plans[0].Resource.Containers() {
containerImageName := versionreg.ReplaceAllString(c.Image, "") containerImageName := versionreg.ReplaceAllString(c.Image, "")
@ -310,54 +338,97 @@ func TestProcessEvent(t *testing.T) {
}, },
}, },
} }
fp.deploymentList = &v1beta1.DeploymentList{ deps := []*apps_v1.Deployment{
Items: []v1beta1.Deployment{ {
v1beta1.Deployment{ meta_v1.TypeMeta{},
meta_v1.TypeMeta{}, meta_v1.ObjectMeta{
meta_v1.ObjectMeta{ Name: "deployment-1",
Name: "deployment-1", Namespace: "ns-1",
Namespace: "xxxx", Labels: map[string]string{types.KeelPolicyLabel: "all"},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Annotations: map[string]string{},
Annotations: map[string]string{}, },
}, apps_v1.DeploymentSpec{
v1beta1.DeploymentSpec{ Template: v1.PodTemplateSpec{
Template: v1.PodTemplateSpec{ ObjectMeta: meta_v1.ObjectMeta{
Spec: v1.PodSpec{ Annotations: map[string]string{
Containers: []v1.Container{ "this": "that",
v1.Container{ },
Image: "gcr.io/v2-namespace/hello-world:1.1.1", },
}, Spec: v1.PodSpec{
Containers: []v1.Container{
v1.Container{
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
v1beta1.Deployment{ apps_v1.DeploymentStatus{},
meta_v1.TypeMeta{}, },
meta_v1.ObjectMeta{ {
Name: "deployment-2", meta_v1.TypeMeta{},
Namespace: "xxxx", meta_v1.ObjectMeta{
Labels: map[string]string{"whatever": "all"}, Name: "deployment-2",
Annotations: map[string]string{}, Namespace: "ns-2",
}, Labels: map[string]string{"whatever": "all"},
v1beta1.DeploymentSpec{ Annotations: map[string]string{},
Template: v1.PodTemplateSpec{ },
Spec: v1.PodSpec{ apps_v1.DeploymentSpec{
Containers: []v1.Container{ Template: v1.PodTemplateSpec{
v1.Container{ ObjectMeta: meta_v1.ObjectMeta{
Image: "gcr.io/v2-namespace/bye-world:1.1.1", Annotations: map[string]string{
}, "this": "that",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
v1.Container{
Image: "gcr.io/v2-namespace/bye-world:1.1.1",
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
apps_v1.DeploymentStatus{},
},
{
meta_v1.TypeMeta{},
meta_v1.ObjectMeta{
Name: "deployment-3",
Namespace: "ns-3",
Labels: map[string]string{
"whatever": "all",
"foo": "bar",
},
Annotations: map[string]string{
"ann": "1",
},
},
apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{
"this": "that",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
v1.Container{
Image: "gcr.io/v2-namespace/bye-world:1.1.1",
},
},
},
},
},
apps_v1.DeploymentStatus{},
}, },
} }
provider, err := NewProvider(fp, &fakeSender{}, approver()) grs := MustParseGRS(deps)
grc := &k8s.GenericResourceCache{}
grc.Add(grs...)
provider, err := NewProvider(fp, &fakeSender{}, approver(), grc)
if err != nil { if err != nil {
t.Fatalf("failed to get provider: %s", err) t.Fatalf("failed to get provider: %s", err)
} }
@ -373,8 +444,12 @@ func TestProcessEvent(t *testing.T) {
t.Errorf("got error while processing event: %s", err) t.Errorf("got error while processing event: %s", err)
} }
if fp.updated.Spec.Template.Spec.Containers[0].Image != repo.Name+":"+repo.Tag { if fp.updated == nil {
t.Errorf("expected to find a deployment with updated image but found: %s", fp.updated.Spec.Template.Spec.Containers[0].Image) t.Fatalf("resource was not updated")
}
if fp.updated.Containers()[0].Image != repo.Name+":"+repo.Tag {
t.Errorf("expected to find a deployment with updated image but found: %s", fp.updated.Containers()[0].Image)
} }
} }
@ -390,33 +465,35 @@ func TestProcessEventBuildNumber(t *testing.T) {
}, },
}, },
} }
fp.deploymentList = &v1beta1.DeploymentList{ deps := []*apps_v1.Deployment{
Items: []v1beta1.Deployment{ {
v1beta1.Deployment{ meta_v1.TypeMeta{},
meta_v1.TypeMeta{}, meta_v1.ObjectMeta{
meta_v1.ObjectMeta{ Name: "deployment-1",
Name: "deployment-1", Namespace: "xxxx",
Namespace: "xxxx", Labels: map[string]string{types.KeelPolicyLabel: "all"},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Annotations: map[string]string{},
Annotations: map[string]string{}, },
}, apps_v1.DeploymentSpec{
v1beta1.DeploymentSpec{ Template: v1.PodTemplateSpec{
Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{
Spec: v1.PodSpec{ Containers: []v1.Container{
Containers: []v1.Container{ v1.Container{
v1.Container{ Image: "gcr.io/v2-namespace/hello-world:10",
Image: "gcr.io/v2-namespace/hello-world:10",
},
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
apps_v1.DeploymentStatus{},
}, },
} }
provider, err := NewProvider(fp, &fakeSender{}, approver()) grs := MustParseGRS(deps)
grc := &k8s.GenericResourceCache{}
grc.Add(grs...)
provider, err := NewProvider(fp, &fakeSender{}, approver(), grc)
if err != nil { if err != nil {
t.Fatalf("failed to get provider: %s", err) t.Fatalf("failed to get provider: %s", err)
} }
@ -432,8 +509,8 @@ func TestProcessEventBuildNumber(t *testing.T) {
t.Errorf("got error while processing event: %s", err) t.Errorf("got error while processing event: %s", err)
} }
if fp.updated.Spec.Template.Spec.Containers[0].Image != repo.Name+":"+repo.Tag { if fp.updated.Containers()[0].Image != repo.Name+":"+repo.Tag {
t.Errorf("expected to find a deployment with updated image but found: %s", fp.updated.Spec.Template.Spec.Containers[0].Image) t.Errorf("expected to find a deployment with updated image but found: %s", fp.updated.Containers()[0].Image)
} }
} }
@ -450,56 +527,57 @@ func TestGetImpactedTwoContainersInSameDeployment(t *testing.T) {
}, },
}, },
} }
fp.deploymentList = &v1beta1.DeploymentList{ deps := []*apps_v1.Deployment{
Items: []v1beta1.Deployment{ {
v1beta1.Deployment{ meta_v1.TypeMeta{},
meta_v1.TypeMeta{}, meta_v1.ObjectMeta{
meta_v1.ObjectMeta{ Name: "dep-1",
Name: "dep-1", Namespace: "xxxx",
Namespace: "xxxx", Labels: map[string]string{types.KeelPolicyLabel: "all"},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Annotations: map[string]string{},
Annotations: map[string]string{}, },
}, apps_v1.DeploymentSpec{
v1beta1.DeploymentSpec{ Template: v1.PodTemplateSpec{
Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{
Spec: v1.PodSpec{ Containers: []v1.Container{
Containers: []v1.Container{ v1.Container{
v1.Container{ Image: "gcr.io/v2-namespace/hello-world:1.1.1",
Image: "gcr.io/v2-namespace/hello-world:1.1.1", },
}, v1.Container{
v1.Container{ Image: "gcr.io/v2-namespace/greetings-world:1.1.1",
Image: "gcr.io/v2-namespace/greetings-world:1.1.1",
},
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
v1beta1.Deployment{ apps_v1.DeploymentStatus{},
meta_v1.TypeMeta{}, },
meta_v1.ObjectMeta{ {
Name: "dep-2", meta_v1.TypeMeta{},
Namespace: "xxxx", meta_v1.ObjectMeta{
Labels: map[string]string{"whatever": "all"}, Name: "dep-2",
}, Namespace: "xxxx",
v1beta1.DeploymentSpec{ Labels: map[string]string{"whatever": "all"},
Template: v1.PodTemplateSpec{ },
Spec: v1.PodSpec{ apps_v1.DeploymentSpec{
Containers: []v1.Container{ Template: v1.PodTemplateSpec{
v1.Container{ Spec: v1.PodSpec{
Image: "gcr.io/v2-namespace/hello-world:1.1.1", Containers: []v1.Container{
}, v1.Container{
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
apps_v1.DeploymentStatus{},
}, },
} }
grs := MustParseGRS(deps)
grc := &k8s.GenericResourceCache{}
grc.Add(grs...)
provider, err := NewProvider(fp, &fakeSender{}, approver()) provider, err := NewProvider(fp, &fakeSender{}, approver(), grc)
if err != nil { if err != nil {
t.Fatalf("failed to get provider: %s", err) t.Fatalf("failed to get provider: %s", err)
} }
@ -520,7 +598,7 @@ func TestGetImpactedTwoContainersInSameDeployment(t *testing.T) {
} }
found := false found := false
for _, c := range plans[0].Deployment.Spec.Template.Spec.Containers { for _, c := range plans[0].Resource.Containers() {
containerImageName := versionreg.ReplaceAllString(c.Image, "") containerImageName := versionreg.ReplaceAllString(c.Image, "")
@ -548,57 +626,59 @@ func TestGetImpactedTwoSameContainersInSameDeployment(t *testing.T) {
}, },
}, },
} }
fp.deploymentList = &v1beta1.DeploymentList{ deps := []*apps_v1.Deployment{
Items: []v1beta1.Deployment{ {
v1beta1.Deployment{ meta_v1.TypeMeta{},
meta_v1.TypeMeta{}, meta_v1.ObjectMeta{
meta_v1.ObjectMeta{ Name: "dep-1",
Name: "dep-1", Namespace: "xxxx",
Namespace: "xxxx", Labels: map[string]string{types.KeelPolicyLabel: "all"},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Annotations: map[string]string{},
Annotations: map[string]string{}, },
}, apps_v1.DeploymentSpec{
v1beta1.DeploymentSpec{ Template: v1.PodTemplateSpec{
Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{
Spec: v1.PodSpec{ Containers: []v1.Container{
Containers: []v1.Container{ v1.Container{
v1.Container{ Image: "gcr.io/v2-namespace/hello-world:1.1.1",
Image: "gcr.io/v2-namespace/hello-world:1.1.1", },
}, v1.Container{
v1.Container{ Image: "gcr.io/v2-namespace/hello-world:1.1.1",
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
},
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
v1beta1.Deployment{ apps_v1.DeploymentStatus{},
meta_v1.TypeMeta{}, },
meta_v1.ObjectMeta{ {
Name: "dep-2", meta_v1.TypeMeta{},
Namespace: "xxxx", meta_v1.ObjectMeta{
Labels: map[string]string{"whatever": "all"}, Name: "dep-2",
Annotations: map[string]string{}, Namespace: "xxxx",
}, Labels: map[string]string{"whatever": "all"},
v1beta1.DeploymentSpec{ Annotations: map[string]string{},
Template: v1.PodTemplateSpec{ },
Spec: v1.PodSpec{ apps_v1.DeploymentSpec{
Containers: []v1.Container{ Template: v1.PodTemplateSpec{
v1.Container{ Spec: v1.PodSpec{
Image: "gcr.io/v2-namespace/hello-world:1.1.1", Containers: []v1.Container{
}, v1.Container{
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
apps_v1.DeploymentStatus{},
}, },
} }
provider, err := NewProvider(fp, &fakeSender{}, approver()) grs := MustParseGRS(deps)
grc := &k8s.GenericResourceCache{}
grc.Add(grs...)
provider, err := NewProvider(fp, &fakeSender{}, approver(), grc)
if err != nil { if err != nil {
t.Fatalf("failed to get provider: %s", err) t.Fatalf("failed to get provider: %s", err)
} }
@ -619,7 +699,7 @@ func TestGetImpactedTwoSameContainersInSameDeployment(t *testing.T) {
} }
found := false found := false
for _, c := range plans[0].Deployment.Spec.Template.Spec.Containers { for _, c := range plans[0].Resource.Containers() {
containerImageName := versionreg.ReplaceAllString(c.Image, "") containerImageName := versionreg.ReplaceAllString(c.Image, "")
@ -646,54 +726,55 @@ func TestGetImpactedUntaggedImage(t *testing.T) {
}, },
}, },
} }
fp.deploymentList = &v1beta1.DeploymentList{ deps := []*apps_v1.Deployment{
Items: []v1beta1.Deployment{ {
v1beta1.Deployment{ meta_v1.TypeMeta{},
meta_v1.TypeMeta{}, meta_v1.ObjectMeta{
meta_v1.ObjectMeta{ Name: "dep-1",
Name: "dep-1", Namespace: "xxxx",
Namespace: "xxxx", Labels: map[string]string{types.KeelPolicyLabel: "all"},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Annotations: map[string]string{},
Annotations: map[string]string{}, },
}, apps_v1.DeploymentSpec{
v1beta1.DeploymentSpec{ Template: v1.PodTemplateSpec{
Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{
Spec: v1.PodSpec{ Containers: []v1.Container{
Containers: []v1.Container{ v1.Container{
v1.Container{ Image: "gcr.io/v2-namespace/foo-world",
Image: "gcr.io/v2-namespace/foo-world",
},
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
v1beta1.Deployment{ apps_v1.DeploymentStatus{},
meta_v1.TypeMeta{}, },
meta_v1.ObjectMeta{ {
Name: "dep-2", meta_v1.TypeMeta{},
Namespace: "xxxx", meta_v1.ObjectMeta{
Annotations: map[string]string{}, Name: "dep-2",
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Namespace: "xxxx",
}, Annotations: map[string]string{},
v1beta1.DeploymentSpec{ Labels: map[string]string{types.KeelPolicyLabel: "all"},
Template: v1.PodTemplateSpec{ },
Spec: v1.PodSpec{ apps_v1.DeploymentSpec{
Containers: []v1.Container{ Template: v1.PodTemplateSpec{
v1.Container{ Spec: v1.PodSpec{
Image: "gcr.io/v2-namespace/hello-world:1.1.1", Containers: []v1.Container{
}, v1.Container{
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
apps_v1.DeploymentStatus{},
}, },
} }
grs := MustParseGRS(deps)
grc := &k8s.GenericResourceCache{}
grc.Add(grs...)
provider, err := NewProvider(fp, &fakeSender{}, approver()) provider, err := NewProvider(fp, &fakeSender{}, approver(), grc)
if err != nil { if err != nil {
t.Fatalf("failed to get provider: %s", err) t.Fatalf("failed to get provider: %s", err)
} }
@ -714,7 +795,7 @@ func TestGetImpactedUntaggedImage(t *testing.T) {
} }
found := false found := false
for _, c := range plans[0].Deployment.Spec.Template.Spec.Containers { for _, c := range plans[0].Resource.Containers() {
containerImageName := versionreg.ReplaceAllString(c.Image, "") containerImageName := versionreg.ReplaceAllString(c.Image, "")
@ -742,54 +823,55 @@ func TestGetImpactedUntaggedOneImage(t *testing.T) {
}, },
}, },
} }
fp.deploymentList = &v1beta1.DeploymentList{ deps := []*apps_v1.Deployment{
Items: []v1beta1.Deployment{ {
v1beta1.Deployment{ meta_v1.TypeMeta{},
meta_v1.TypeMeta{}, meta_v1.ObjectMeta{
meta_v1.ObjectMeta{ Name: "dep-1",
Name: "dep-1", Namespace: "xxxx",
Namespace: "xxxx", Labels: map[string]string{types.KeelPolicyLabel: "all"},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Annotations: map[string]string{},
Annotations: map[string]string{}, },
}, apps_v1.DeploymentSpec{
v1beta1.DeploymentSpec{ Template: v1.PodTemplateSpec{
Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{
Spec: v1.PodSpec{ Containers: []v1.Container{
Containers: []v1.Container{ v1.Container{
v1.Container{ Image: "gcr.io/v2-namespace/hello-world",
Image: "gcr.io/v2-namespace/hello-world",
},
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
v1beta1.Deployment{ apps_v1.DeploymentStatus{},
meta_v1.TypeMeta{}, },
meta_v1.ObjectMeta{ {
Name: "dep-2", meta_v1.TypeMeta{},
Namespace: "xxxx", meta_v1.ObjectMeta{
Annotations: map[string]string{}, Name: "dep-2",
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Namespace: "xxxx",
}, Annotations: map[string]string{},
v1beta1.DeploymentSpec{ Labels: map[string]string{types.KeelPolicyLabel: "all"},
Template: v1.PodTemplateSpec{ },
Spec: v1.PodSpec{ apps_v1.DeploymentSpec{
Containers: []v1.Container{ Template: v1.PodTemplateSpec{
v1.Container{ Spec: v1.PodSpec{
Image: "gcr.io/v2-namespace/hello-world:1.1.1", Containers: []v1.Container{
}, v1.Container{
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
apps_v1.DeploymentStatus{},
}, },
} }
grs := MustParseGRS(deps)
grc := &k8s.GenericResourceCache{}
grc.Add(grs...)
provider, err := NewProvider(fp, &fakeSender{}, approver()) provider, err := NewProvider(fp, &fakeSender{}, approver(), grc)
if err != nil { if err != nil {
t.Fatalf("failed to get provider: %s", err) t.Fatalf("failed to get provider: %s", err)
} }
@ -811,7 +893,7 @@ func TestGetImpactedUntaggedOneImage(t *testing.T) {
found := false found := false
for _, plan := range plans { for _, plan := range plans {
for _, c := range plan.Deployment.Spec.Template.Spec.Containers { for _, c := range plan.Resource.Containers() {
containerImageName := versionreg.ReplaceAllString(c.Image, "") containerImageName := versionreg.ReplaceAllString(c.Image, "")
@ -839,37 +921,39 @@ func TestTrackedImages(t *testing.T) {
}, },
}, },
} }
fp.deploymentList = &v1beta1.DeploymentList{ deps := []*apps_v1.Deployment{
Items: []v1beta1.Deployment{ {
v1beta1.Deployment{ meta_v1.TypeMeta{},
meta_v1.TypeMeta{}, meta_v1.ObjectMeta{
meta_v1.ObjectMeta{ Name: "dep-1",
Name: "dep-1", Namespace: "xxxx",
Namespace: "xxxx", Labels: map[string]string{types.KeelPolicyLabel: "all"},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, },
}, apps_v1.DeploymentSpec{
v1beta1.DeploymentSpec{ Template: v1.PodTemplateSpec{
Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{
Spec: v1.PodSpec{ Containers: []v1.Container{
Containers: []v1.Container{ v1.Container{
v1.Container{ Image: "gcr.io/v2-namespace/hello-world:1.1",
Image: "gcr.io/v2-namespace/hello-world:1.1",
},
}, },
ImagePullSecrets: []v1.LocalObjectReference{ },
v1.LocalObjectReference{ ImagePullSecrets: []v1.LocalObjectReference{
Name: "very-secret", v1.LocalObjectReference{
}, Name: "very-secret",
}, },
}, },
}, },
}, },
v1beta1.DeploymentStatus{},
}, },
apps_v1.DeploymentStatus{},
}, },
} }
provider, err := NewProvider(fp, &fakeSender{}, approver()) grs := MustParseGRS(deps)
grc := &k8s.GenericResourceCache{}
grc.Add(grs...)
provider, err := NewProvider(fp, &fakeSender{}, approver(), grc)
if err != nil { if err != nil {
t.Fatalf("failed to get provider: %s", err) t.Fatalf("failed to get provider: %s", err)
} }

View File

@ -2,17 +2,16 @@ package kubernetes
import ( import (
"fmt" "fmt"
"time"
"k8s.io/api/extensions/v1beta1" "github.com/keel-hq/keel/internal/k8s"
"github.com/keel-hq/keel/types" "github.com/keel-hq/keel/types"
"github.com/keel-hq/keel/util/image" "github.com/keel-hq/keel/util/image"
"github.com/keel-hq/keel/util/timeutil"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
func (p *Provider) checkUnversionedDeployment(policy types.PolicyType, repo *types.Repository, deployment v1beta1.Deployment) (updatePlan *UpdatePlan, shouldUpdateDeployment bool, err error) { func (p *Provider) checkUnversionedDeployment(policy types.PolicyType, repo *types.Repository, resource *k8s.GenericResource) (updatePlan *UpdatePlan, shouldUpdateDeployment bool, err error) {
updatePlan = &UpdatePlan{} updatePlan = &UpdatePlan{}
eventRepoRef, err := image.Parse(repo.String()) eventRepoRef, err := image.Parse(repo.String())
@ -20,22 +19,21 @@ func (p *Provider) checkUnversionedDeployment(policy types.PolicyType, repo *typ
return return
} }
labels := deployment.GetLabels() labels := resource.GetLabels()
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"labels": labels, "labels": labels,
"name": deployment.Name, "name": resource.Name,
"namespace": deployment.Namespace, "namespace": resource.Namespace,
"kind": resource.Kind(),
"policy": policy, "policy": policy,
}).Info("provider.kubernetes.checkVersionedDeployment: keel policy found, checking deployment...") }).Info("provider.kubernetes.checkVersionedDeployment: keel policy found, checking resource...")
annotations := deployment.GetAnnotations() annotations := resource.GetAnnotations()
shouldUpdateDeployment = false shouldUpdateDeployment = false
for idx, c := range deployment.Spec.Template.Spec.Containers { // for idx, c := range deployment.Spec.Template.Spec.Containers {
// Remove version if any for idx, c := range resource.Containers() {
// containerImageName := versionreg.ReplaceAllString(c.Image, "")
containerImageRef, err := image.Parse(c.Image) containerImageRef, err := image.Parse(c.Image)
if err != nil { if err != nil {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
@ -46,8 +44,9 @@ func (p *Provider) checkUnversionedDeployment(policy types.PolicyType, repo *typ
} }
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"name": deployment.Name, "name": resource.Name,
"namespace": deployment.Namespace, "namespace": resource.Namespace,
"kind": resource.Kind(),
"parsed_image_name": containerImageRef.Remote(), "parsed_image_name": containerImageRef.Remote(),
"target_image_name": repo.Name, "target_image_name": repo.Name,
"target_tag": repo.Tag, "target_tag": repo.Tag,
@ -72,42 +71,31 @@ func (p *Provider) checkUnversionedDeployment(policy types.PolicyType, repo *typ
} }
// updating annotations // updating annotations
annotations := deployment.GetAnnotations() // annotations := resource.GetAnnotations()
if _, ok := annotations[types.KeelForceTagMatchLabel]; ok { matchTag, ok := annotations[types.KeelForceTagMatchLabel]
if containerImageRef.Tag() != eventRepoRef.Tag() { if ok {
if matchTag != "" && containerImageRef.Tag() != eventRepoRef.Tag() {
continue continue
} }
if deployment.Spec.Template.Annotations == nil {
deployment.Spec.Template.Annotations = map[string]string{}
}
deployment.Spec.Template.Annotations["time"] = timeutil.Now().String()
} }
// updating spec template annotations
specAnnotations := resource.GetSpecAnnotations()
specAnnotations[types.KeelUpdateTimeAnnotation] = time.Now().String()
resource.SetSpecAnnotations(specAnnotations)
// updating image // updating image
if containerImageRef.Registry() == image.DefaultRegistryHostname { if containerImageRef.Registry() == image.DefaultRegistryHostname {
c.Image = fmt.Sprintf("%s:%s", containerImageRef.ShortName(), repo.Tag) resource.UpdateContainer(idx, fmt.Sprintf("%s:%s", containerImageRef.ShortName(), repo.Tag))
} else { } else {
c.Image = fmt.Sprintf("%s:%s", containerImageRef.Repository(), repo.Tag) resource.UpdateContainer(idx, fmt.Sprintf("%s:%s", containerImageRef.Repository(), repo.Tag))
} }
deployment.Spec.Template.Spec.Containers[idx] = c
// marking this deployment for update
shouldUpdateDeployment = true shouldUpdateDeployment = true
// updating digest if available
if repo.Digest != "" {
// annotations[types.KeelDigestAnnotation+"/"+containerImageRef.Remote()] = repo.Digest
}
// adding image for updates
annotations = addImageToPull(annotations, c.Image)
deployment.SetAnnotations(annotations)
updatePlan.CurrentVersion = containerImageRef.Tag() updatePlan.CurrentVersion = containerImageRef.Tag()
updatePlan.NewVersion = repo.Tag updatePlan.NewVersion = repo.Tag
updatePlan.Deployment = deployment updatePlan.Resource = resource
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"parsed_image": containerImageRef.Remote(), "parsed_image": containerImageRef.Remote(),

View File

@ -7,10 +7,13 @@ import (
"github.com/keel-hq/keel/approvals" "github.com/keel-hq/keel/approvals"
"github.com/keel-hq/keel/extension/notification" "github.com/keel-hq/keel/extension/notification"
"github.com/keel-hq/keel/internal/k8s"
"github.com/keel-hq/keel/types" "github.com/keel-hq/keel/types"
"github.com/keel-hq/keel/util/timeutil" "github.com/keel-hq/keel/util/timeutil"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1" // "k8s.io/api/extensions/apps_v1"
apps_v1 "k8s.io/api/apps/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
@ -29,9 +32,9 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
stop chan struct{} stop chan struct{}
} }
type args struct { type args struct {
policy types.PolicyType policy types.PolicyType
repo *types.Repository repo *types.Repository
deployment v1beta1.Deployment resource *k8s.GenericResource
} }
tests := []struct { tests := []struct {
name string name string
@ -46,7 +49,7 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
args: args{ args: args{
policy: types.PolicyTypeForce, policy: types.PolicyTypeForce,
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "latest"}, repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "latest"},
deployment: v1beta1.Deployment{ resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -54,8 +57,13 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
Annotations: map[string]string{}, Annotations: map[string]string{},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Labels: map[string]string{types.KeelPolicyLabel: "all"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{
"this": "that",
},
},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
@ -65,20 +73,25 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
}, },
wantUpdatePlan: &UpdatePlan{ wantUpdatePlan: &UpdatePlan{
Deployment: v1beta1.Deployment{ Resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
Namespace: "xxxx", Namespace: "xxxx",
Annotations: map[string]string{forceUpdateImageAnnotation: "gcr.io/v2-namespace/hello-world:latest"}, Annotations: map[string]string{},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Labels: map[string]string{types.KeelPolicyLabel: "all"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{
"this": "that",
},
},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
@ -88,8 +101,8 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
NewVersion: "latest", NewVersion: "latest",
CurrentVersion: "latest", CurrentVersion: "latest",
}, },
@ -101,7 +114,7 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
args: args{ args: args{
policy: types.PolicyTypeForce, policy: types.PolicyTypeForce,
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "latest"}, repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "latest"},
deployment: v1beta1.Deployment{ resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -109,7 +122,7 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
Annotations: map[string]string{}, Annotations: map[string]string{},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Labels: map[string]string{types.KeelPolicyLabel: "all"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
@ -120,11 +133,12 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
}, },
wantUpdatePlan: &UpdatePlan{ wantUpdatePlan: &UpdatePlan{
Deployment: v1beta1.Deployment{}, // Resource: &k8s.GenericResource{},
Resource: nil,
}, },
wantShouldUpdateDeployment: false, wantShouldUpdateDeployment: false,
wantErr: false, wantErr: false,
@ -134,7 +148,7 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
args: args{ args: args{
policy: types.PolicyTypeForce, policy: types.PolicyTypeForce,
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "master"}, repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "master"},
deployment: v1beta1.Deployment{ resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -146,7 +160,7 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
types.KeelPolicyLabel: "all", types.KeelPolicyLabel: "all",
}, },
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
@ -157,11 +171,11 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
}, },
wantUpdatePlan: &UpdatePlan{ wantUpdatePlan: &UpdatePlan{
Deployment: v1beta1.Deployment{}, Resource: nil,
}, },
wantShouldUpdateDeployment: false, wantShouldUpdateDeployment: false,
wantErr: false, wantErr: false,
@ -171,7 +185,7 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
args: args{ args: args{
policy: types.PolicyTypeForce, policy: types.PolicyTypeForce,
repo: &types.Repository{Name: "karolisr/keel", Tag: "0.2.0"}, repo: &types.Repository{Name: "karolisr/keel", Tag: "0.2.0"},
deployment: v1beta1.Deployment{ resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -179,8 +193,13 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
Annotations: map[string]string{}, Annotations: map[string]string{},
Labels: map[string]string{types.KeelPolicyLabel: "force"}, Labels: map[string]string{types.KeelPolicyLabel: "force"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{
"this": "that",
},
},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
@ -190,20 +209,25 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
}, },
wantUpdatePlan: &UpdatePlan{ wantUpdatePlan: &UpdatePlan{
Deployment: v1beta1.Deployment{ Resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
Namespace: "xxxx", Namespace: "xxxx",
Annotations: map[string]string{forceUpdateImageAnnotation: "karolisr/keel:0.2.0"}, Annotations: map[string]string{},
Labels: map[string]string{types.KeelPolicyLabel: "force"}, Labels: map[string]string{types.KeelPolicyLabel: "force"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{
"this": "that",
},
},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
@ -213,8 +237,8 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
NewVersion: "0.2.0", NewVersion: "0.2.0",
CurrentVersion: "latest", CurrentVersion: "latest",
}, },
@ -226,7 +250,7 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
args: args{ args: args{
policy: types.PolicyTypeForce, policy: types.PolicyTypeForce,
repo: &types.Repository{Name: "karolisr/keel", Tag: "master"}, repo: &types.Repository{Name: "karolisr/keel", Tag: "master"},
deployment: v1beta1.Deployment{ resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -234,8 +258,13 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
Annotations: map[string]string{types.KeelPollScheduleAnnotation: types.KeelPollDefaultSchedule}, Annotations: map[string]string{types.KeelPollScheduleAnnotation: types.KeelPollDefaultSchedule},
Labels: map[string]string{types.KeelPolicyLabel: "force"}, Labels: map[string]string{types.KeelPolicyLabel: "force"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{
"this": "that",
},
},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
@ -245,23 +274,27 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
}, },
wantUpdatePlan: &UpdatePlan{ wantUpdatePlan: &UpdatePlan{
Deployment: v1beta1.Deployment{ Resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
Namespace: "xxxx", Namespace: "xxxx",
Annotations: map[string]string{ Annotations: map[string]string{
types.KeelPollScheduleAnnotation: types.KeelPollDefaultSchedule, types.KeelPollScheduleAnnotation: types.KeelPollDefaultSchedule,
forceUpdateImageAnnotation: "karolisr/keel:master",
}, },
Labels: map[string]string{types.KeelPolicyLabel: "force"}, Labels: map[string]string{types.KeelPolicyLabel: "force"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{
"this": "that",
},
},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
@ -271,8 +304,8 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
NewVersion: "master", NewVersion: "master",
CurrentVersion: "master", CurrentVersion: "master",
}, },
@ -285,7 +318,7 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
args: args{ args: args{
policy: types.PolicyTypeForce, policy: types.PolicyTypeForce,
repo: &types.Repository{Name: "karolisr/keel", Tag: "latest-staging"}, repo: &types.Repository{Name: "karolisr/keel", Tag: "latest-staging"},
deployment: v1beta1.Deployment{ resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -296,8 +329,13 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
types.KeelForceTagMatchLabel: "yup", types.KeelForceTagMatchLabel: "yup",
}, },
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{
"this": "that",
},
},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
@ -307,11 +345,11 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
}, },
wantUpdatePlan: &UpdatePlan{ wantUpdatePlan: &UpdatePlan{
Deployment: v1beta1.Deployment{ Resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -319,15 +357,14 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
Annotations: map[string]string{ Annotations: map[string]string{
types.KeelPollScheduleAnnotation: types.KeelPollDefaultSchedule, types.KeelPollScheduleAnnotation: types.KeelPollDefaultSchedule,
types.KeelForceTagMatchLabel: "yup", types.KeelForceTagMatchLabel: "yup",
forceUpdateImageAnnotation: "karolisr/keel:latest-staging",
}, },
Labels: map[string]string{types.KeelPolicyLabel: "force"}, Labels: map[string]string{types.KeelPolicyLabel: "force"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{ ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{ Annotations: map[string]string{
"time": timeutil.Now().String(), "this": "that",
}, },
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
@ -339,8 +376,8 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
NewVersion: "latest-staging", NewVersion: "latest-staging",
CurrentVersion: "latest-staging", CurrentVersion: "latest-staging",
}, },
@ -353,7 +390,7 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
args: args{ args: args{
policy: types.PolicyTypeForce, policy: types.PolicyTypeForce,
repo: &types.Repository{Host: "eu.gcr.io", Name: "karolisr/keel", Tag: "latest-staging"}, repo: &types.Repository{Host: "eu.gcr.io", Name: "karolisr/keel", Tag: "latest-staging"},
deployment: v1beta1.Deployment{ resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -364,7 +401,7 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
types.KeelForceTagMatchLabel: "yup", types.KeelForceTagMatchLabel: "yup",
}, },
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{ ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{ Annotations: map[string]string{
@ -380,11 +417,11 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
}, },
wantUpdatePlan: &UpdatePlan{ wantUpdatePlan: &UpdatePlan{
Deployment: v1beta1.Deployment{ Resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -392,16 +429,15 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
Annotations: map[string]string{ Annotations: map[string]string{
types.KeelPollScheduleAnnotation: types.KeelPollDefaultSchedule, types.KeelPollScheduleAnnotation: types.KeelPollDefaultSchedule,
types.KeelForceTagMatchLabel: "yup", types.KeelForceTagMatchLabel: "yup",
forceUpdateImageAnnotation: "eu.gcr.io/karolisr/keel:latest-staging",
}, },
Labels: map[string]string{types.KeelPolicyLabel: "force"}, Labels: map[string]string{types.KeelPolicyLabel: "force"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{ ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{ Annotations: map[string]string{
"this": "that", "this": "that",
"time": timeutil.Now().String(), // "time": timeutil.Now().String(),
}, },
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
@ -413,21 +449,20 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
NewVersion: "latest-staging", NewVersion: "latest-staging",
CurrentVersion: "latest-staging", CurrentVersion: "latest-staging",
}, },
wantShouldUpdateDeployment: true, wantShouldUpdateDeployment: true,
wantErr: false, wantErr: false,
}, },
{ {
name: "poll trigger, force-match, different tag", name: "poll trigger, force-match, different tag",
args: args{ args: args{
policy: types.PolicyTypeForce, policy: types.PolicyTypeForce,
repo: &types.Repository{Name: "karolisr/keel", Tag: "latest-staging"}, repo: &types.Repository{Name: "karolisr/keel", Tag: "latest-staging"},
deployment: v1beta1.Deployment{ resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -435,8 +470,9 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
Annotations: map[string]string{types.KeelPollScheduleAnnotation: types.KeelPollDefaultSchedule}, Annotations: map[string]string{types.KeelPollScheduleAnnotation: types.KeelPollDefaultSchedule},
Labels: map[string]string{types.KeelPolicyLabel: "force"}, Labels: map[string]string{types.KeelPolicyLabel: "force"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
@ -446,11 +482,11 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
}, },
wantUpdatePlan: &UpdatePlan{ wantUpdatePlan: &UpdatePlan{
Deployment: v1beta1.Deployment{}, Resource: nil,
}, },
wantShouldUpdateDeployment: false, wantShouldUpdateDeployment: false,
wantErr: false, wantErr: false,
@ -465,11 +501,23 @@ func TestProvider_checkUnversionedDeployment(t *testing.T) {
events: tt.fields.events, events: tt.fields.events,
stop: tt.fields.stop, stop: tt.fields.stop,
} }
gotUpdatePlan, gotShouldUpdateDeployment, err := p.checkUnversionedDeployment(tt.args.policy, tt.args.repo, tt.args.deployment) gotUpdatePlan, gotShouldUpdateDeployment, err := p.checkUnversionedDeployment(tt.args.policy, tt.args.repo, tt.args.resource)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("Provider.checkUnversionedDeployment() error = %#v, wantErr %#v", err, tt.wantErr) t.Errorf("Provider.checkUnversionedDeployment() error = %#v, wantErr %#v", err, tt.wantErr)
return return
} }
if gotShouldUpdateDeployment {
ann := gotUpdatePlan.Resource.GetSpecAnnotations()
if ann[types.KeelUpdateTimeAnnotation] != "" {
delete(ann, types.KeelUpdateTimeAnnotation)
gotUpdatePlan.Resource.SetSpecAnnotations(ann)
} else {
t.Errorf("Provider.checkUnversionedDeployment() missing types.KeelUpdateTimeAnnotation annotation")
}
}
if !reflect.DeepEqual(gotUpdatePlan, tt.wantUpdatePlan) { if !reflect.DeepEqual(gotUpdatePlan, tt.wantUpdatePlan) {
t.Errorf("Provider.checkUnversionedDeployment() gotUpdatePlan = %#v, want %#v", gotUpdatePlan, tt.wantUpdatePlan) t.Errorf("Provider.checkUnversionedDeployment() gotUpdatePlan = %#v, want %#v", gotUpdatePlan, tt.wantUpdatePlan)
} }

View File

@ -2,14 +2,13 @@ package kubernetes
import ( import (
"fmt" "fmt"
"time"
// "k8s.io/api/core/v1" // "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
// "k8s.io/api/extensions/v1beta1" // "k8s.io/api/extensions/v1beta1"
"k8s.io/api/extensions/v1beta1"
"github.com/keel-hq/keel/internal/k8s"
"github.com/keel-hq/keel/types" "github.com/keel-hq/keel/types"
"github.com/keel-hq/keel/util/image" "github.com/keel-hq/keel/util/image"
@ -19,7 +18,7 @@ import (
) )
// func (p *Provider) checkVersionedDeployment(newVersion *types.Version, policy types.PolicyType, repo *types.Repository, deployment v1beta1.Deployment) (updated v1beta1.Deployment, shouldUpdateDeployment bool, err error) { // func (p *Provider) checkVersionedDeployment(newVersion *types.Version, policy types.PolicyType, repo *types.Repository, deployment v1beta1.Deployment) (updated v1beta1.Deployment, shouldUpdateDeployment bool, err error) {
func (p *Provider) checkVersionedDeployment(newVersion *types.Version, policy types.PolicyType, repo *types.Repository, deployment v1beta1.Deployment) (updatePlan *UpdatePlan, shouldUpdateDeployment bool, err error) { func (p *Provider) checkVersionedDeployment(newVersion *types.Version, policy types.PolicyType, repo *types.Repository, resource *k8s.GenericResource) (updatePlan *UpdatePlan, shouldUpdateDeployment bool, err error) {
updatePlan = &UpdatePlan{} updatePlan = &UpdatePlan{}
eventRepoRef, err := image.Parse(repo.Name) eventRepoRef, err := image.Parse(repo.Name)
@ -27,22 +26,24 @@ func (p *Provider) checkVersionedDeployment(newVersion *types.Version, policy ty
return return
} }
labels := deployment.GetLabels() labels := resource.GetLabels()
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"labels": labels, "labels": labels,
"name": deployment.Name, "name": resource.Name,
"namespace": deployment.Namespace, "namespace": resource.Namespace,
"kind": resource.Kind(),
"policy": policy, "policy": policy,
}).Info("provider.kubernetes.checkVersionedDeployment: keel policy found, checking deployment...") }).Info("provider.kubernetes.checkVersionedDeployment: keel policy found, checking resource...")
shouldUpdateDeployment = false shouldUpdateDeployment = false
for idx, c := range deployment.Spec.Template.Spec.Containers { // for idx, c := range deployment.Spec.Template.Spec.Containers {
for idx, c := range resource.Containers() {
// Remove version if any // Remove version if any
// containerImageName := versionreg.ReplaceAllString(c.Image, "") // containerImageName := versionreg.ReplaceAllString(c.Image, "")
conatinerImageRef, err := image.Parse(c.Image) containerImageRef, err := image.Parse(c.Image)
if err != nil { if err != nil {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"error": err, "error": err,
@ -52,18 +53,19 @@ func (p *Provider) checkVersionedDeployment(newVersion *types.Version, policy ty
} }
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"name": deployment.Name, "name": resource.Name,
"namespace": deployment.Namespace, "namespace": resource.Namespace,
"parsed_image_name": conatinerImageRef.Remote(), "parsed_image_name": containerImageRef.Remote(),
"kind": resource.Kind(),
"target_image_name": repo.Name, "target_image_name": repo.Name,
"target_tag": repo.Tag, "target_tag": repo.Tag,
"policy": policy, "policy": policy,
"image": c.Image, "image": c.Image,
}).Info("provider.kubernetes: checking image") }).Info("provider.kubernetes: checking image")
if conatinerImageRef.Repository() != eventRepoRef.Repository() { if containerImageRef.Repository() != eventRepoRef.Repository() {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"parsed_image_name": conatinerImageRef.Remote(), "parsed_image_name": containerImageRef.Remote(),
"target_image_name": repo.Name, "target_image_name": repo.Name,
}).Info("provider.kubernetes: images do not match, ignoring") }).Info("provider.kubernetes: images do not match, ignoring")
continue continue
@ -71,33 +73,26 @@ func (p *Provider) checkVersionedDeployment(newVersion *types.Version, policy ty
// if policy is force, don't bother with version checking // if policy is force, don't bother with version checking
// same with `latest` images, update them to versioned ones // same with `latest` images, update them to versioned ones
if policy == types.PolicyTypeForce || conatinerImageRef.Tag() == "latest" { if policy == types.PolicyTypeForce || containerImageRef.Tag() == "latest" {
c = updateContainer(c, conatinerImageRef, newVersion.String()) if containerImageRef.Registry() == image.DefaultRegistryHostname {
resource.UpdateContainer(idx, fmt.Sprintf("%s:%s", containerImageRef.ShortName(), newVersion.String()))
deployment.Spec.Template.Spec.Containers[idx] = c } else {
resource.UpdateContainer(idx, fmt.Sprintf("%s:%s", containerImageRef.Repository(), newVersion.String()))
// marking this deployment for update
shouldUpdateDeployment = true
// updating digest if available
annotations := deployment.GetAnnotations()
if repo.Digest != "" {
// annotations[types.KeelDigestAnnotation+"/"+conatinerImageRef.Remote()] = repo.Digest
} }
annotations = addImageToPull(annotations, c.Image) shouldUpdateDeployment = true
setUpdateTime(resource)
deployment.SetAnnotations(annotations)
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"parsed_image": conatinerImageRef.Remote(), "parsed_image": containerImageRef.Remote(),
"raw_image_name": c.Image, "raw_image_name": c.Image,
"target_image": repo.Name, "target_image": repo.Name,
"target_image_tag": repo.Tag, "target_image_tag": repo.Tag,
"policy": policy, "policy": policy,
}).Info("provider.kubernetes: impacted deployment container found") }).Info("provider.kubernetes: impacted deployment container found")
updatePlan.CurrentVersion = conatinerImageRef.Tag() updatePlan.CurrentVersion = containerImageRef.Tag()
updatePlan.NewVersion = newVersion.Original updatePlan.NewVersion = newVersion.Original
updatePlan.Deployment = deployment updatePlan.Resource = resource
// success, moving to next container // success, moving to next container
continue continue
@ -108,7 +103,7 @@ func (p *Provider) checkVersionedDeployment(newVersion *types.Version, policy ty
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"error": err, "error": err,
"container_image": c.Image, "container_image": c.Image,
"container_image_tag": conatinerImageRef.Tag(), "container_image_tag": containerImageRef.Tag(),
"keel_policy": policy, "keel_policy": policy,
}).Error("provider.kubernetes: failed to get image version, is it tagged as semver?") }).Error("provider.kubernetes: failed to get image version, is it tagged as semver?")
continue continue
@ -116,8 +111,8 @@ func (p *Provider) checkVersionedDeployment(newVersion *types.Version, policy ty
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"labels": labels, "labels": labels,
"name": deployment.Name, "name": resource.Name,
"namespace": deployment.Namespace, "namespace": resource.Namespace,
"image": c.Image, "image": c.Image,
"current_version": currentVersion.String(), "current_version": currentVersion.String(),
"policy": policy, "policy": policy,
@ -136,8 +131,8 @@ func (p *Provider) checkVersionedDeployment(newVersion *types.Version, policy ty
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"labels": labels, "labels": labels,
"name": deployment.Name, "name": resource.Name,
"namespace": deployment.Namespace, "namespace": resource.Namespace,
"image": c.Image, "image": c.Image,
"current_version": currentVersion.String(), "current_version": currentVersion.String(),
"new_version": newVersion.String(), "new_version": newVersion.String(),
@ -146,25 +141,27 @@ func (p *Provider) checkVersionedDeployment(newVersion *types.Version, policy ty
}).Info("provider.kubernetes: checked version, deciding whether to update") }).Info("provider.kubernetes: checked version, deciding whether to update")
if shouldUpdateContainer { if shouldUpdateContainer {
c = updateContainer(c, conatinerImageRef, newVersion.String())
deployment.Spec.Template.Spec.Containers[idx] = c // c = updateContainer(c, conatinerImageRef, newVersion.String())
if containerImageRef.Registry() == image.DefaultRegistryHostname {
resource.UpdateContainer(idx, fmt.Sprintf("%s:%s", containerImageRef.ShortName(), newVersion.String()))
} else {
resource.UpdateContainer(idx, fmt.Sprintf("%s:%s", containerImageRef.Repository(), newVersion.String()))
}
// deployment.Spec.Template.Spec.Containers[idx] = c
// marking this deployment for update // marking this deployment for update
shouldUpdateDeployment = true shouldUpdateDeployment = true
// updating annotations setUpdateTime(resource)
annotations := deployment.GetAnnotations()
// updating digest if available
if repo.Digest != "" {
// annotations[types.KeelDigestAnnotation+"/"+conatinerImageRef.Remote()] = repo.Digest
}
deployment.SetAnnotations(annotations)
updatePlan.CurrentVersion = currentVersion.Original updatePlan.CurrentVersion = currentVersion.Original
updatePlan.NewVersion = newVersion.Original updatePlan.NewVersion = newVersion.Original
updatePlan.Deployment = deployment updatePlan.Resource = resource
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"parsed_image": conatinerImageRef.Remote(), "parsed_image": containerImageRef.Remote(),
"raw_image_name": c.Image, "raw_image_name": c.Image,
"target_image": repo.Name, "target_image": repo.Name,
"target_image_tag": repo.Tag, "target_image_tag": repo.Tag,
@ -176,13 +173,8 @@ func (p *Provider) checkVersionedDeployment(newVersion *types.Version, policy ty
return updatePlan, shouldUpdateDeployment, nil return updatePlan, shouldUpdateDeployment, nil
} }
func updateContainer(container v1.Container, ref *image.Reference, version string) v1.Container { func setUpdateTime(resource *k8s.GenericResource) {
// updating image specAnnotations := resource.GetSpecAnnotations()
if ref.Registry() == image.DefaultRegistryHostname { specAnnotations[types.KeelUpdateTimeAnnotation] = time.Now().String()
container.Image = fmt.Sprintf("%s:%s", ref.ShortName(), version) resource.SetSpecAnnotations(specAnnotations)
} else {
container.Image = fmt.Sprintf("%s:%s", ref.Repository(), version)
}
return container
} }

View File

@ -6,13 +6,14 @@ import (
"github.com/keel-hq/keel/approvals" "github.com/keel-hq/keel/approvals"
"github.com/keel-hq/keel/extension/notification" "github.com/keel-hq/keel/extension/notification"
"github.com/keel-hq/keel/internal/k8s"
"github.com/keel-hq/keel/types" "github.com/keel-hq/keel/types"
"github.com/keel-hq/keel/util/version" "github.com/keel-hq/keel/util/version"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apps_v1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
) )
func unsafeGetVersion(ver string) *types.Version { func unsafeGetVersion(ver string) *types.Version {
@ -35,7 +36,7 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
newVersion *types.Version newVersion *types.Version
policy types.PolicyType policy types.PolicyType
repo *types.Repository repo *types.Repository
deployment v1beta1.Deployment resource *k8s.GenericResource
} }
tests := []struct { tests := []struct {
name string name string
@ -51,7 +52,7 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
newVersion: unsafeGetVersion("1.1.2"), newVersion: unsafeGetVersion("1.1.2"),
policy: types.PolicyTypeAll, policy: types.PolicyTypeAll,
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.2"}, repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.2"},
deployment: v1beta1.Deployment{ resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -59,8 +60,13 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
Annotations: map[string]string{}, Annotations: map[string]string{},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Labels: map[string]string{types.KeelPolicyLabel: "all"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{
"this": "that",
},
},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
@ -70,11 +76,11 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
}, },
wantUpdatePlan: &UpdatePlan{ wantUpdatePlan: &UpdatePlan{
Deployment: v1beta1.Deployment{ Resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -82,8 +88,13 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
Annotations: map[string]string{}, Annotations: map[string]string{},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Labels: map[string]string{types.KeelPolicyLabel: "all"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{
"this": "that",
},
},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
@ -93,8 +104,8 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
NewVersion: "1.1.2", NewVersion: "1.1.2",
CurrentVersion: "1.1.1", CurrentVersion: "1.1.1",
}, },
@ -107,7 +118,7 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
newVersion: unsafeGetVersion("1.1.1"), newVersion: unsafeGetVersion("1.1.1"),
policy: types.PolicyTypeAll, policy: types.PolicyTypeAll,
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.1"}, repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.1"},
deployment: v1beta1.Deployment{ resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -115,7 +126,7 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
Annotations: map[string]string{}, Annotations: map[string]string{},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Labels: map[string]string{types.KeelPolicyLabel: "all"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
@ -126,11 +137,11 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
}, },
wantUpdatePlan: &UpdatePlan{ wantUpdatePlan: &UpdatePlan{
Deployment: v1beta1.Deployment{}, Resource: nil,
NewVersion: "", NewVersion: "",
CurrentVersion: "", CurrentVersion: "",
}, },
@ -143,7 +154,7 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
newVersion: unsafeGetVersion("1.1.2"), newVersion: unsafeGetVersion("1.1.2"),
policy: types.PolicyTypeAll, policy: types.PolicyTypeAll,
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.2"}, repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.2"},
deployment: v1beta1.Deployment{ resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -151,8 +162,13 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
Annotations: map[string]string{}, Annotations: map[string]string{},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Labels: map[string]string{types.KeelPolicyLabel: "all"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{
"this": "that",
},
},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
@ -165,11 +181,11 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
}, },
wantUpdatePlan: &UpdatePlan{ wantUpdatePlan: &UpdatePlan{
Deployment: v1beta1.Deployment{ Resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -177,8 +193,13 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
Annotations: map[string]string{}, Annotations: map[string]string{},
Labels: map[string]string{types.KeelPolicyLabel: "all"}, Labels: map[string]string{types.KeelPolicyLabel: "all"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{
"this": "that",
},
},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
@ -191,8 +212,8 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
NewVersion: "1.1.2", NewVersion: "1.1.2",
CurrentVersion: "1.1.1", CurrentVersion: "1.1.1",
}, },
@ -205,7 +226,7 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
newVersion: unsafeGetVersion("1.1.2"), newVersion: unsafeGetVersion("1.1.2"),
policy: types.PolicyTypeForce, policy: types.PolicyTypeForce,
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.2"}, repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.2"},
deployment: v1beta1.Deployment{ resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
@ -213,8 +234,13 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
Annotations: map[string]string{}, Annotations: map[string]string{},
Labels: map[string]string{types.KeelPolicyLabel: "force"}, Labels: map[string]string{types.KeelPolicyLabel: "force"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{
"this": "that",
},
},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
@ -227,20 +253,25 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
}, },
wantUpdatePlan: &UpdatePlan{ wantUpdatePlan: &UpdatePlan{
Deployment: v1beta1.Deployment{ Resource: MustParseGR(&apps_v1.Deployment{
meta_v1.TypeMeta{}, meta_v1.TypeMeta{},
meta_v1.ObjectMeta{ meta_v1.ObjectMeta{
Name: "dep-1", Name: "dep-1",
Namespace: "xxxx", Namespace: "xxxx",
Annotations: map[string]string{forceUpdateImageAnnotation: "gcr.io/v2-namespace/hello-world:1.1.2"}, Annotations: map[string]string{},
Labels: map[string]string{types.KeelPolicyLabel: "force"}, Labels: map[string]string{types.KeelPolicyLabel: "force"},
}, },
v1beta1.DeploymentSpec{ apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Annotations: map[string]string{
"this": "that",
},
},
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
v1.Container{ v1.Container{
@ -253,8 +284,8 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
}, },
}, },
}, },
v1beta1.DeploymentStatus{}, apps_v1.DeploymentStatus{},
}, }),
NewVersion: "1.1.2", NewVersion: "1.1.2",
CurrentVersion: "latest", CurrentVersion: "latest",
}, },
@ -262,6 +293,7 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
wantErr: false, wantErr: false,
}, },
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
p := &Provider{ p := &Provider{
@ -271,11 +303,23 @@ func TestProvider_checkVersionedDeployment(t *testing.T) {
events: tt.fields.events, events: tt.fields.events,
stop: tt.fields.stop, stop: tt.fields.stop,
} }
gotUpdatePlan, gotShouldUpdateDeployment, err := p.checkVersionedDeployment(tt.args.newVersion, tt.args.policy, tt.args.repo, tt.args.deployment) gotUpdatePlan, gotShouldUpdateDeployment, err := p.checkVersionedDeployment(tt.args.newVersion, tt.args.policy, tt.args.repo, tt.args.resource)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("Provider.checkVersionedDeployment() error = %v, wantErr %v", err, tt.wantErr) t.Errorf("Provider.checkVersionedDeployment() error = %v, wantErr %v", err, tt.wantErr)
return return
} }
if gotShouldUpdateDeployment {
ann := gotUpdatePlan.Resource.GetSpecAnnotations()
_, ok := ann[types.KeelUpdateTimeAnnotation]
if ok {
delete(ann, types.KeelUpdateTimeAnnotation)
gotUpdatePlan.Resource.SetSpecAnnotations(ann)
} else {
t.Errorf("Provider.checkVersionedDeployment() missing types.KeelUpdateTimeAnnotation annotation")
}
}
if !reflect.DeepEqual(gotUpdatePlan, tt.wantUpdatePlan) { if !reflect.DeepEqual(gotUpdatePlan, tt.wantUpdatePlan) {
t.Errorf("Provider.checkVersionedDeployment() gotUpdatePlan = %v, want %v", gotUpdatePlan, tt.wantUpdatePlan) t.Errorf("Provider.checkVersionedDeployment() gotUpdatePlan = %v, want %v", gotUpdatePlan, tt.wantUpdatePlan)
} }

View File

@ -25,6 +25,8 @@ func mustEncode(data string) string {
return base64.StdEncoding.EncodeToString([]byte(data)) return base64.StdEncoding.EncodeToString([]byte(data))
} }
var secretDockerConfigJSONPayloadWithUsernamePassword = `{"auths":{"https://index.docker.io/v1/":{"username":"login","password":"somepass","email":"email@email.com","auth":"longbase64secret"}}}`
func TestGetSecret(t *testing.T) { func TestGetSecret(t *testing.T) {
imgRef, _ := image.Parse("karolisr/webhook-demo:0.0.11") imgRef, _ := image.Parse("karolisr/webhook-demo:0.0.11")
@ -92,6 +94,39 @@ func TestGetDockerConfigJSONSecret(t *testing.T) {
t.Errorf("unexpected pass: %s", creds.Password) t.Errorf("unexpected pass: %s", creds.Password)
} }
} }
func TestGetDockerConfigJSONSecretUsernmePassword(t *testing.T) {
imgRef, _ := image.Parse("karolisr/webhook-demo:0.0.11")
impl := &testutil.FakeK8sImplementer{
AvailableSecret: &v1.Secret{
Data: map[string][]byte{
dockerConfigJSONKey: []byte(secretDockerConfigJSONPayloadWithUsernamePassword),
},
Type: v1.SecretTypeDockerConfigJson,
},
}
getter := NewGetter(impl)
trackedImage := &types.TrackedImage{
Image: imgRef,
Namespace: "default",
Secrets: []string{"myregistrysecret"},
}
creds, err := getter.Get(trackedImage)
if err != nil {
t.Errorf("failed to get creds: %s", err)
}
if creds.Username != "login" {
t.Errorf("unexpected username: %s", creds.Username)
}
if creds.Password != "somepass" {
t.Errorf("unexpected pass: %s", creds.Password)
}
}
func TestGetSecretNotFound(t *testing.T) { func TestGetSecretNotFound(t *testing.T) {
imgRef, _ := image.Parse("karolisr/webhook-demo:0.0.11") imgRef, _ := image.Parse("karolisr/webhook-demo:0.0.11")

View File

@ -25,6 +25,7 @@ const KeelPolicyLabel = "keel.sh/policy"
// changes // changes
const KeelTriggerLabel = "keel.sh/trigger" const KeelTriggerLabel = "keel.sh/trigger"
// KeelForceTagMatchLabel - label that checks whether tags match before force updating
const KeelForceTagMatchLabel = "keel.sh/match-tag" const KeelForceTagMatchLabel = "keel.sh/match-tag"
// KeelPollScheduleAnnotation - optional variable to setup custom schedule for polling, defaults to @every 10m // KeelPollScheduleAnnotation - optional variable to setup custom schedule for polling, defaults to @every 10m
@ -43,6 +44,9 @@ const KeelNotificationChanAnnotation = "keel.sh/notify"
// KeelMinimumApprovalsLabel - min approvals // KeelMinimumApprovalsLabel - min approvals
const KeelMinimumApprovalsLabel = "keel.sh/approvals" const KeelMinimumApprovalsLabel = "keel.sh/approvals"
// KeelUpdateTimeAnnotation - update time
const KeelUpdateTimeAnnotation = "keel.sh/update-time"
// KeelApprovalDeadlineLabel - approval deadline // KeelApprovalDeadlineLabel - approval deadline
const KeelApprovalDeadlineLabel = "keel.sh/approvalDeadline" const KeelApprovalDeadlineLabel = "keel.sh/approvalDeadline"

View File

@ -1,8 +1,10 @@
package testing package testing
import ( import (
"github.com/keel-hq/keel/internal/k8s"
apps_v1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
core_v1 "k8s.io/client-go/kubernetes/typed/core/v1" core_v1 "k8s.io/client-go/kubernetes/typed/core/v1"
) )
@ -10,11 +12,11 @@ import (
// FakeK8sImplementer - fake implementer used for testing // FakeK8sImplementer - fake implementer used for testing
type FakeK8sImplementer struct { type FakeK8sImplementer struct {
NamespacesList *v1.NamespaceList NamespacesList *v1.NamespaceList
DeploymentSingle *v1beta1.Deployment DeploymentSingle *apps_v1.Deployment
DeploymentList *v1beta1.DeploymentList DeploymentList *apps_v1.DeploymentList
// stores value of an updated deployment // stores value of an updated deployment
Updated *v1beta1.Deployment Updated *k8s.GenericResource
AvailableSecret *v1.Secret AvailableSecret *v1.Secret
@ -31,18 +33,18 @@ func (i *FakeK8sImplementer) Namespaces() (*v1.NamespaceList, error) {
} }
// Deployment - available deployment, doesn't filter anything // Deployment - available deployment, doesn't filter anything
func (i *FakeK8sImplementer) Deployment(namespace, name string) (*v1beta1.Deployment, error) { func (i *FakeK8sImplementer) Deployment(namespace, name string) (*apps_v1.Deployment, error) {
return i.DeploymentSingle, nil return i.DeploymentSingle, nil
} }
// Deployments - available deployments // Deployments - available deployments
func (i *FakeK8sImplementer) Deployments(namespace string) (*v1beta1.DeploymentList, error) { func (i *FakeK8sImplementer) Deployments(namespace string) (*apps_v1.DeploymentList, error) {
return i.DeploymentList, nil return i.DeploymentList, nil
} }
// Update - update deployment // Update - update deployment
func (i *FakeK8sImplementer) Update(deployment *v1beta1.Deployment) error { func (i *FakeK8sImplementer) Update(obj *k8s.GenericResource) error {
i.Updated = deployment i.Updated = obj
return nil return nil
} }

15
vendor/github.com/davecgh/go-spew/LICENSE generated vendored Normal file
View File

@ -0,0 +1,15 @@
ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

152
vendor/github.com/davecgh/go-spew/spew/bypass.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe
package spew
import (
"reflect"
"unsafe"
)
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = false
// ptrSize is the size of a pointer on the current arch.
ptrSize = unsafe.Sizeof((*byte)(nil))
)
var (
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = uintptr(ptrSize * 2)
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
//
// flagRO indicates whether or not the value field of a reflect.Value is
// read-only.
//
// flagIndir indicates whether the value field of a reflect.Value is
// the actual data or a pointer to the data.
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
)
func init() {
// Older versions of reflect.Value stored small integers directly in the
// ptr field (which is named val in the older versions). Versions
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// Commit 90a7c3c86944 changed the flag positions such that the low
// order bits are the kind. This code extracts the kind from the flags
// field and ensures it's the correct type. When it's not, the flag
// order has been changed to the newer format, so the flags are updated
// accordingly.
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
upfv := *(*uintptr)(upf)
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
flagKindShift = 0
flagRO = 1 << 5
flagIndir = 1 << 6
// Commit adf9b30e5594 modified the flags to separate the
// flagRO flag into two bits which specifies whether or not the
// field is embedded. This causes flagIndir to move over a bit
// and means that flagRO is the combination of either of the
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
}
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
// the typical safety restrictions preventing access to unaddressable and
// unexported data. It works by digging the raw pointer to the underlying
// value out of the protected value and generating a new unprotected (unsafe)
// reflect.Value to it.
//
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
indirects := 1
vt := v.Type()
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
if rvf&flagIndir != 0 {
vt = reflect.PtrTo(v.Type())
indirects++
} else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// reference types.
switch vt.Kind() {
case reflect.Uintptr:
case reflect.Chan:
case reflect.Func:
case reflect.Map:
case reflect.Ptr:
case reflect.UnsafePointer:
default:
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
offsetScalar)
}
}
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
}

38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe
package spew
import "reflect"
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = true
)
// unsafeReflectValue typically converts the passed reflect.Value into a one
// that bypasses the typical safety restrictions preventing access to
// unaddressable and unexported data. However, doing this relies on access to
// the unsafe package. This is a stub version which simply returns the passed
// reflect.Value when the unsafe package is not available.
func unsafeReflectValue(v reflect.Value) reflect.Value {
return v
}

341
vendor/github.com/davecgh/go-spew/spew/common.go generated vendored Normal file
View File

@ -0,0 +1,341 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"reflect"
"sort"
"strconv"
)
// Some constants in the form of bytes to avoid string overhead. This mirrors
// the technique used in the fmt package.
var (
panicBytes = []byte("(PANIC=")
plusBytes = []byte("+")
iBytes = []byte("i")
trueBytes = []byte("true")
falseBytes = []byte("false")
interfaceBytes = []byte("(interface {})")
commaNewlineBytes = []byte(",\n")
newlineBytes = []byte("\n")
openBraceBytes = []byte("{")
openBraceNewlineBytes = []byte("{\n")
closeBraceBytes = []byte("}")
asteriskBytes = []byte("*")
colonBytes = []byte(":")
colonSpaceBytes = []byte(": ")
openParenBytes = []byte("(")
closeParenBytes = []byte(")")
spaceBytes = []byte(" ")
pointerChainBytes = []byte("->")
nilAngleBytes = []byte("<nil>")
maxNewlineBytes = []byte("<max depth reached>\n")
maxShortBytes = []byte("<max>")
circularBytes = []byte("<already shown>")
circularShortBytes = []byte("<shown>")
invalidAngleBytes = []byte("<invalid>")
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
percentBytes = []byte("%")
precisionBytes = []byte(".")
openAngleBytes = []byte("<")
closeAngleBytes = []byte(">")
openMapBytes = []byte("map[")
closeMapBytes = []byte("]")
lenEqualsBytes = []byte("len=")
capEqualsBytes = []byte("cap=")
)
// hexDigits is used to map a decimal value to a hex digit.
var hexDigits = "0123456789abcdef"
// catchPanic handles any panics that might occur during the handleMethods
// calls.
func catchPanic(w io.Writer, v reflect.Value) {
if err := recover(); err != nil {
w.Write(panicBytes)
fmt.Fprintf(w, "%v", err)
w.Write(closeParenBytes)
}
}
// handleMethods attempts to call the Error and String methods on the underlying
// type the passed reflect.Value represents and outputes the result to Writer w.
//
// It handles panics in any called methods by catching and displaying the error
// as the formatted value.
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
// We need an interface to check if the type implements the error or
// Stringer interface. However, the reflect package won't give us an
// interface on certain things like unexported struct fields in order
// to enforce visibility rules. We use unsafe, when it's available,
// to bypass these restrictions since this package does not mutate the
// values.
if !v.CanInterface() {
if UnsafeDisabled {
return false
}
v = unsafeReflectValue(v)
}
// Choose whether or not to do error and Stringer interface lookups against
// the base type or a pointer to the base type depending on settings.
// Technically calling one of these methods with a pointer receiver can
// mutate the value, however, types which choose to satisify an error or
// Stringer interface with a pointer receiver should not be mutating their
// state inside these interface methods.
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
v = unsafeReflectValue(v)
}
if v.CanAddr() {
v = v.Addr()
}
// Is it an error or Stringer?
switch iface := v.Interface().(type) {
case error:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.Error()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.Error()))
return true
case fmt.Stringer:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.String()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.String()))
return true
}
return false
}
// printBool outputs a boolean value as true or false to Writer w.
func printBool(w io.Writer, val bool) {
if val {
w.Write(trueBytes)
} else {
w.Write(falseBytes)
}
}
// printInt outputs a signed integer value to Writer w.
func printInt(w io.Writer, val int64, base int) {
w.Write([]byte(strconv.FormatInt(val, base)))
}
// printUint outputs an unsigned integer value to Writer w.
func printUint(w io.Writer, val uint64, base int) {
w.Write([]byte(strconv.FormatUint(val, base)))
}
// printFloat outputs a floating point value using the specified precision,
// which is expected to be 32 or 64bit, to Writer w.
func printFloat(w io.Writer, val float64, precision int) {
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
}
// printComplex outputs a complex value using the specified float precision
// for the real and imaginary parts to Writer w.
func printComplex(w io.Writer, c complex128, floatPrecision int) {
r := real(c)
w.Write(openParenBytes)
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
i := imag(c)
if i >= 0 {
w.Write(plusBytes)
}
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
w.Write(iBytes)
w.Write(closeParenBytes)
}
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
num := uint64(p)
if num == 0 {
w.Write(nilAngleBytes)
return
}
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
buf := make([]byte, 18)
// It's simpler to construct the hex string right to left.
base := uint64(16)
i := len(buf) - 1
for num >= base {
buf[i] = hexDigits[num%base]
num /= base
i--
}
buf[i] = hexDigits[num]
// Add '0x' prefix.
i--
buf[i] = 'x'
i--
buf[i] = '0'
// Strip unused leading bytes.
buf = buf[i:]
w.Write(buf)
}
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
// elements to be sorted.
type valuesSorter struct {
values []reflect.Value
strings []string // either nil or same len and values
cs *ConfigState
}
// newValuesSorter initializes a valuesSorter instance, which holds a set of
// surrogate keys on which the data should be sorted. It uses flags in
// ConfigState to decide if and how to populate those surrogate keys.
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
vs := &valuesSorter{values: values, cs: cs}
if canSortSimply(vs.values[0].Kind()) {
return vs
}
if !cs.DisableMethods {
vs.strings = make([]string, len(values))
for i := range vs.values {
b := bytes.Buffer{}
if !handleMethods(cs, &b, vs.values[i]) {
vs.strings = nil
break
}
vs.strings[i] = b.String()
}
}
if vs.strings == nil && cs.SpewKeys {
vs.strings = make([]string, len(values))
for i := range vs.values {
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
}
}
return vs
}
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
// directly, or whether it should be considered for sorting by surrogate keys
// (if the ConfigState allows it).
func canSortSimply(kind reflect.Kind) bool {
// This switch parallels valueSortLess, except for the default case.
switch kind {
case reflect.Bool:
return true
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return true
case reflect.Float32, reflect.Float64:
return true
case reflect.String:
return true
case reflect.Uintptr:
return true
case reflect.Array:
return true
}
return false
}
// Len returns the number of values in the slice. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Len() int {
return len(s.values)
}
// Swap swaps the values at the passed indices. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
if s.strings != nil {
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
}
}
// valueSortLess returns whether the first value should sort before the second
// value. It is used by valueSorter.Less as part of the sort.Interface
// implementation.
func valueSortLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Bool:
return !a.Bool() && b.Bool()
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return a.Int() < b.Int()
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return a.Uint() < b.Uint()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.String:
return a.String() < b.String()
case reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Array:
// Compare the contents of both arrays.
l := a.Len()
for i := 0; i < l; i++ {
av := a.Index(i)
bv := b.Index(i)
if av.Interface() == bv.Interface() {
continue
}
return valueSortLess(av, bv)
}
}
return a.String() < b.String()
}
// Less returns whether the value at index i should sort before the
// value at index j. It is part of the sort.Interface implementation.
func (s *valuesSorter) Less(i, j int) bool {
if s.strings == nil {
return valueSortLess(s.values[i], s.values[j])
}
return s.strings[i] < s.strings[j]
}
// sortValues is a sort function that handles both native types and any type that
// can be converted to error or Stringer. Other inputs are sorted according to
// their Value.String() value to ensure display stability.
func sortValues(values []reflect.Value, cs *ConfigState) {
if len(values) == 0 {
return
}
sort.Sort(newValuesSorter(values, cs))
}

306
vendor/github.com/davecgh/go-spew/spew/config.go generated vendored Normal file
View File

@ -0,0 +1,306 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"os"
)
// ConfigState houses the configuration options used by spew to format and
// display values. There is a global instance, Config, that is used to control
// all top-level Formatter and Dump functionality. Each ConfigState instance
// provides methods equivalent to the top-level functions.
//
// The zero value for ConfigState provides no indentation. You would typically
// want to set it to a space or a tab.
//
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
// with default settings. See the documentation of NewDefaultConfig for default
// values.
type ConfigState struct {
// Indent specifies the string to use for each indentation level. The
// global config instance that all top-level functions use set this to a
// single space by default. If you would like more indentation, you might
// set this to a tab with "\t" or perhaps two spaces with " ".
Indent string
// MaxDepth controls the maximum number of levels to descend into nested
// data structures. The default, 0, means there is no limit.
//
// NOTE: Circular data structures are properly detected, so it is not
// necessary to set this value unless you specifically want to limit deeply
// nested data structures.
MaxDepth int
// DisableMethods specifies whether or not error and Stringer interfaces are
// invoked for types that implement them.
DisableMethods bool
// DisablePointerMethods specifies whether or not to check for and invoke
// error and Stringer interfaces on types which only accept a pointer
// receiver when the current type is not a pointer.
//
// NOTE: This might be an unsafe action since calling one of these methods
// with a pointer receiver could technically mutate the value, however,
// in practice, types which choose to satisify an error or Stringer
// interface with a pointer receiver should not be mutating their state
// inside these interface methods. As a result, this option relies on
// access to the unsafe package, so it will not have any effect when
// running in environments without access to the unsafe package such as
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer
// interface and return immediately instead of continuing to recurse into
// the internals of the data type.
//
// NOTE: This flag does not have any effect if method invocation is disabled
// via the DisableMethods or DisablePointerMethods options.
ContinueOnMethod bool
// SortKeys specifies map keys should be sorted before being printed. Use
// this to have a more deterministic, diffable output. Note that only
// native types (bool, int, uint, floats, uintptr and string) and types
// that support the error or Stringer interfaces (if methods are
// enabled) are supported, with other types sorted according to the
// reflect.Value.String() output which guarantees display stability.
SortKeys bool
// SpewKeys specifies that, as a last resort attempt, map keys should
// be spewed to strings and sorted by those strings. This is only
// considered if SortKeys is true.
SpewKeys bool
}
// Config is the active configuration of the top-level functions.
// The configuration can be changed by modifying the contents of spew.Config.
var Config = ConfigState{Indent: " "}
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the formatted string as a value that satisfies error. See NewFormatter
// for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, c.convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, c.convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, c.convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a Formatter interface returned by c.NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, c.convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
return fmt.Print(c.convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, c.convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
return fmt.Println(c.convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprint(a ...interface{}) string {
return fmt.Sprint(c.convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, c.convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a Formatter interface returned by c.NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintln(a ...interface{}) string {
return fmt.Sprintln(c.convertArgs(a)...)
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
c.Printf, c.Println, or c.Printf.
*/
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(c, v)
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
fdump(c, w, a...)
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func (c *ConfigState) Dump(a ...interface{}) {
fdump(c, os.Stdout, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func (c *ConfigState) Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(c, &buf, a...)
return buf.String()
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a spew Formatter interface using
// the ConfigState associated with s.
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = newFormatter(c, arg)
}
return formatters
}
// NewDefaultConfig returns a ConfigState with the following default settings.
//
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "}
}

211
vendor/github.com/davecgh/go-spew/spew/doc.go generated vendored Normal file
View File

@ -0,0 +1,211 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Package spew implements a deep pretty printer for Go data structures to aid in
debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
via the spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
spew.Dump(myVar1, myVar2, ...)
You may also call spew.Fdump if you would prefer to output to an arbitrary
io.Writer. For example, to dump to standard error:
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) (len=1) {
(string) (len=3) "one": (bool) true
}
}
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
formatter is useful for inline printing of smaller data types similar to the
standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
functions have syntax you are most likely already familiar with:
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Println(myVar, myVar2)
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
See the Index for the full list convenience functions.
Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
See the Printf example for details on the setup of variables being shown
here.
Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information
inline with the output. Since spew is intended to provide deep pretty printing
capabilities on structures, it intentionally does not return any errors.
*/
package spew

509
vendor/github.com/davecgh/go-spew/spew/dump.go generated vendored Normal file
View File

@ -0,0 +1,509 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"os"
"reflect"
"regexp"
"strconv"
"strings"
)
var (
// uint8Type is a reflect.Type representing a uint8. It is used to
// convert cgo types to uint8 slices for hexdumping.
uint8Type = reflect.TypeOf(uint8(0))
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
)
// dumpState contains information about the state of a dump operation.
type dumpState struct {
w io.Writer
depth int
pointers map[uintptr]int
ignoreNextType bool
ignoreNextIndent bool
cs *ConfigState
}
// indent performs indentation according to the depth level and cs.Indent
// option.
func (d *dumpState) indent() {
if d.ignoreNextIndent {
d.ignoreNextIndent = false
return
}
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
}
// unpackValue returns values inside of non-nil interfaces when possible.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface && !v.IsNil() {
v = v.Elem()
}
return v
}
// dumpPtr handles formatting of pointers by indirecting them as necessary.
func (d *dumpState) dumpPtr(v reflect.Value) {
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range d.pointers {
if depth >= d.depth {
delete(d.pointers, k)
}
}
// Keep list of all dereferenced pointers to show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by dereferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
cycleFound = true
indirects--
break
}
d.pointers[addr] = d.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type information.
d.w.Write(openParenBytes)
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
d.w.Write([]byte(ve.Type().String()))
d.w.Write(closeParenBytes)
// Display pointer information.
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
d.w.Write(pointerChainBytes)
}
printHexPtr(d.w, addr)
}
d.w.Write(closeParenBytes)
}
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound == true:
d.w.Write(nilAngleBytes)
case cycleFound == true:
d.w.Write(circularBytes)
default:
d.ignoreNextType = true
d.dump(ve)
}
d.w.Write(closeParenBytes)
}
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
// reflection) arrays and slices are dumped in hexdump -C fashion.
func (d *dumpState) dumpSlice(v reflect.Value) {
// Determine whether this type should be hex dumped or not. Also,
// for types which should be hexdumped, try to use the underlying data
// first, then fall back to trying to convert them to a uint8 slice.
var buf []uint8
doConvert := false
doHexDump := false
numEntries := v.Len()
if numEntries > 0 {
vt := v.Index(0).Type()
vts := vt.String()
switch {
// C types that need to be converted.
case cCharRE.MatchString(vts):
fallthrough
case cUnsignedCharRE.MatchString(vts):
fallthrough
case cUint8tCharRE.MatchString(vts):
doConvert = true
// Try to use existing uint8 slices and fall back to converting
// and copying if that fails.
case vt.Kind() == reflect.Uint8:
// We need an addressable interface to convert the type
// to a byte slice. However, the reflect package won't
// give us an interface on certain things like
// unexported struct fields in order to enforce
// visibility rules. We use unsafe, when available, to
// bypass these restrictions since this package does not
// mutate the values.
vs := v
if !vs.CanInterface() || !vs.CanAddr() {
vs = unsafeReflectValue(vs)
}
if !UnsafeDisabled {
vs = vs.Slice(0, numEntries)
// Use the existing uint8 slice if it can be
// type asserted.
iface := vs.Interface()
if slice, ok := iface.([]uint8); ok {
buf = slice
doHexDump = true
break
}
}
// The underlying data needs to be converted if it can't
// be type asserted to a uint8 slice.
doConvert = true
}
// Copy and convert the underlying type if needed.
if doConvert && vt.ConvertibleTo(uint8Type) {
// Convert and copy each element into a uint8 byte
// slice.
buf = make([]uint8, numEntries)
for i := 0; i < numEntries; i++ {
vv := v.Index(i)
buf[i] = uint8(vv.Convert(uint8Type).Uint())
}
doHexDump = true
}
}
// Hexdump the entire slice as needed.
if doHexDump {
indent := strings.Repeat(d.cs.Indent, d.depth)
str := indent + hex.Dump(buf)
str = strings.Replace(str, "\n", "\n"+indent, -1)
str = strings.TrimRight(str, d.cs.Indent)
d.w.Write([]byte(str))
return
}
// Recursively call dump for each item.
for i := 0; i < numEntries; i++ {
d.dump(d.unpackValue(v.Index(i)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
// dump is the main workhorse for dumping a value. It uses the passed reflect
// value to figure out what kind of object we are dealing with and formats it
// appropriately. It is a recursive function, however circular data structures
// are detected and handled properly.
func (d *dumpState) dump(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
d.w.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
d.indent()
d.dumpPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !d.ignoreNextType {
d.indent()
d.w.Write(openParenBytes)
d.w.Write([]byte(v.Type().String()))
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
d.ignoreNextType = false
// Display length and capacity if the built-in len and cap functions
// work with the value's kind and the len/cap itself is non-zero.
valueLen, valueCap := 0, 0
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
valueLen, valueCap = v.Len(), v.Cap()
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}
d.w.Write(capEqualsBytes)
printInt(d.w, int64(valueCap), 10)
}
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
// Call Stringer/error interfaces if they exist and the handle methods flag
// is enabled
if !d.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(d.cs, d.w, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(d.w, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(d.w, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(d.w, v.Uint(), 10)
case reflect.Float32:
printFloat(d.w, v.Float(), 32)
case reflect.Float64:
printFloat(d.w, v.Float(), 64)
case reflect.Complex64:
printComplex(d.w, v.Complex(), 32)
case reflect.Complex128:
printComplex(d.w, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
d.dumpSlice(v)
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.String:
d.w.Write([]byte(strconv.Quote(v.String())))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
d.w.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
numEntries := v.Len()
keys := v.MapKeys()
if d.cs.SortKeys {
sortValues(keys, d.cs)
}
for i, key := range keys {
d.dump(d.unpackValue(key))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.MapIndex(key)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Struct:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
vt := v.Type()
numFields := v.NumField()
for i := 0; i < numFields; i++ {
d.indent()
vtf := vt.Field(i)
d.w.Write([]byte(vtf.Name))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.Field(i)))
if i < (numFields - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(d.w, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(d.w, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it in case any new
// types are added.
default:
if v.CanInterface() {
fmt.Fprintf(d.w, "%v", v.Interface())
} else {
fmt.Fprintf(d.w, "%v", v.String())
}
}
}
// fdump is a helper function to consolidate the logic from the various public
// methods which take varying writers and config states.
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
for _, arg := range a {
if arg == nil {
w.Write(interfaceBytes)
w.Write(spaceBytes)
w.Write(nilAngleBytes)
w.Write(newlineBytes)
continue
}
d := dumpState{w: w, cs: cs}
d.pointers = make(map[uintptr]int)
d.dump(reflect.ValueOf(arg))
d.w.Write(newlineBytes)
}
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func Fdump(w io.Writer, a ...interface{}) {
fdump(&Config, w, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(&Config, &buf, a...)
return buf.String()
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func Dump(a ...interface{}) {
fdump(&Config, os.Stdout, a...)
}

419
vendor/github.com/davecgh/go-spew/spew/format.go generated vendored Normal file
View File

@ -0,0 +1,419 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"
)
// supportedFlags is a list of all the character flags supported by fmt package.
const supportedFlags = "0-+# "
// formatState implements the fmt.Formatter interface and contains information
// about the state of a formatting operation. The NewFormatter function can
// be used to get a new Formatter which can be used directly as arguments
// in standard fmt package printing calls.
type formatState struct {
value interface{}
fs fmt.State
depth int
pointers map[uintptr]int
ignoreNextType bool
cs *ConfigState
}
// buildDefaultFormat recreates the original format string without precision
// and width information to pass in to fmt.Sprintf in the case of an
// unrecognized type. Unless new types are added to the language, this
// function won't ever be called.
func (f *formatState) buildDefaultFormat() (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
buf.WriteRune('v')
format = buf.String()
return format
}
// constructOrigFormat recreates the original format string including precision
// and width information to pass along to the standard fmt package. This allows
// automatic deferral of all format strings this package doesn't support.
func (f *formatState) constructOrigFormat(verb rune) (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
if width, ok := f.fs.Width(); ok {
buf.WriteString(strconv.Itoa(width))
}
if precision, ok := f.fs.Precision(); ok {
buf.Write(precisionBytes)
buf.WriteString(strconv.Itoa(precision))
}
buf.WriteRune(verb)
format = buf.String()
return format
}
// unpackValue returns values inside of non-nil interfaces when possible and
// ensures that types for values which have been unpacked from an interface
// are displayed when the show types flag is also set.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface {
f.ignoreNextType = false
if !v.IsNil() {
v = v.Elem()
}
}
return v
}
// formatPtr handles formatting of pointers by indirecting them as necessary.
func (f *formatState) formatPtr(v reflect.Value) {
// Display nil if top level pointer is nil.
showTypes := f.fs.Flag('#')
if v.IsNil() && (!showTypes || f.ignoreNextType) {
f.fs.Write(nilAngleBytes)
return
}
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range f.pointers {
if depth >= f.depth {
delete(f.pointers, k)
}
}
// Keep list of all dereferenced pointers to possibly show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by derferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
cycleFound = true
indirects--
break
}
f.pointers[addr] = f.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type or indirection level depending on flags.
if showTypes && !f.ignoreNextType {
f.fs.Write(openParenBytes)
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
f.fs.Write([]byte(ve.Type().String()))
f.fs.Write(closeParenBytes)
} else {
if nilFound || cycleFound {
indirects += strings.Count(ve.Type().String(), "*")
}
f.fs.Write(openAngleBytes)
f.fs.Write([]byte(strings.Repeat("*", indirects)))
f.fs.Write(closeAngleBytes)
}
// Display pointer information depending on flags.
if f.fs.Flag('+') && (len(pointerChain) > 0) {
f.fs.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
f.fs.Write(pointerChainBytes)
}
printHexPtr(f.fs, addr)
}
f.fs.Write(closeParenBytes)
}
// Display dereferenced value.
switch {
case nilFound == true:
f.fs.Write(nilAngleBytes)
case cycleFound == true:
f.fs.Write(circularShortBytes)
default:
f.ignoreNextType = true
f.format(ve)
}
}
// format is the main workhorse for providing the Formatter interface. It
// uses the passed reflect value to figure out what kind of object we are
// dealing with and formats it appropriately. It is a recursive function,
// however circular data structures are detected and handled properly.
func (f *formatState) format(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
f.fs.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
f.formatPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !f.ignoreNextType && f.fs.Flag('#') {
f.fs.Write(openParenBytes)
f.fs.Write([]byte(v.Type().String()))
f.fs.Write(closeParenBytes)
}
f.ignoreNextType = false
// Call Stringer/error interfaces if they exist and the handle methods
// flag is enabled.
if !f.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(f.cs, f.fs, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(f.fs, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(f.fs, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(f.fs, v.Uint(), 10)
case reflect.Float32:
printFloat(f.fs, v.Float(), 32)
case reflect.Float64:
printFloat(f.fs, v.Float(), 64)
case reflect.Complex64:
printComplex(f.fs, v.Complex(), 32)
case reflect.Complex128:
printComplex(f.fs, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
f.fs.Write(openBracketBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
numEntries := v.Len()
for i := 0; i < numEntries; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(v.Index(i)))
}
}
f.depth--
f.fs.Write(closeBracketBytes)
case reflect.String:
f.fs.Write([]byte(v.String()))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
f.fs.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
f.fs.Write(openMapBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
keys := v.MapKeys()
if f.cs.SortKeys {
sortValues(keys, f.cs)
}
for i, key := range keys {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(key))
f.fs.Write(colonBytes)
f.ignoreNextType = true
f.format(f.unpackValue(v.MapIndex(key)))
}
}
f.depth--
f.fs.Write(closeMapBytes)
case reflect.Struct:
numFields := v.NumField()
f.fs.Write(openBraceBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
vt := v.Type()
for i := 0; i < numFields; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
vtf := vt.Field(i)
if f.fs.Flag('+') || f.fs.Flag('#') {
f.fs.Write([]byte(vtf.Name))
f.fs.Write(colonBytes)
}
f.format(f.unpackValue(v.Field(i)))
}
}
f.depth--
f.fs.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(f.fs, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(f.fs, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it if any get added.
default:
format := f.buildDefaultFormat()
if v.CanInterface() {
fmt.Fprintf(f.fs, format, v.Interface())
} else {
fmt.Fprintf(f.fs, format, v.String())
}
}
}
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
// details.
func (f *formatState) Format(fs fmt.State, verb rune) {
f.fs = fs
// Use standard formatting for verbs that are not v.
if verb != 'v' {
format := f.constructOrigFormat(verb)
fmt.Fprintf(fs, format, f.value)
return
}
if f.value == nil {
if fs.Flag('#') {
fs.Write(interfaceBytes)
}
fs.Write(nilAngleBytes)
return
}
f.format(reflect.ValueOf(f.value))
}
// newFormatter is a helper function to consolidate the logic from the various
// public methods which take varying config states.
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
fs := &formatState{value: v, cs: cs}
fs.pointers = make(map[uintptr]int)
return fs
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
Printf, Println, or Fprintf.
*/
func NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(&Config, v)
}

148
vendor/github.com/davecgh/go-spew/spew/spew.go generated vendored Normal file
View File

@ -0,0 +1,148 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"fmt"
"io"
)
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the formatted string as a value that satisfies error. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a default Formatter interface returned by NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
func Print(a ...interface{}) (n int, err error) {
return fmt.Print(convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
func Println(a ...interface{}) (n int, err error) {
return fmt.Println(convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprint(a ...interface{}) string {
return fmt.Sprint(convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintln(a ...interface{}) string {
return fmt.Sprintln(convertArgs(a)...)
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a default spew Formatter interface.
func convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = NewFormatter(arg)
}
return formatters
}

23
vendor/github.com/hashicorp/golang-lru/.gitignore generated vendored Normal file
View File

@ -0,0 +1,23 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test

223
vendor/github.com/hashicorp/golang-lru/2q.go generated vendored Normal file
View File

@ -0,0 +1,223 @@
package lru
import (
"fmt"
"sync"
"github.com/hashicorp/golang-lru/simplelru"
)
const (
// Default2QRecentRatio is the ratio of the 2Q cache dedicated
// to recently added entries that have only been accessed once.
Default2QRecentRatio = 0.25
// Default2QGhostEntries is the default ratio of ghost
// entries kept to track entries recently evicted
Default2QGhostEntries = 0.50
)
// TwoQueueCache is a thread-safe fixed size 2Q cache.
// 2Q is an enhancement over the standard LRU cache
// in that it tracks both frequently and recently used
// entries separately. This avoids a burst in access to new
// entries from evicting frequently used entries. It adds some
// additional tracking overhead to the standard LRU cache, and is
// computationally about 2x the cost, and adds some metadata over
// head. The ARCCache is similar, but does not require setting any
// parameters.
type TwoQueueCache struct {
size int
recentSize int
recent simplelru.LRUCache
frequent simplelru.LRUCache
recentEvict simplelru.LRUCache
lock sync.RWMutex
}
// New2Q creates a new TwoQueueCache using the default
// values for the parameters.
func New2Q(size int) (*TwoQueueCache, error) {
return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
}
// New2QParams creates a new TwoQueueCache using the provided
// parameter values.
func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) {
if size <= 0 {
return nil, fmt.Errorf("invalid size")
}
if recentRatio < 0.0 || recentRatio > 1.0 {
return nil, fmt.Errorf("invalid recent ratio")
}
if ghostRatio < 0.0 || ghostRatio > 1.0 {
return nil, fmt.Errorf("invalid ghost ratio")
}
// Determine the sub-sizes
recentSize := int(float64(size) * recentRatio)
evictSize := int(float64(size) * ghostRatio)
// Allocate the LRUs
recent, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
frequent, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
recentEvict, err := simplelru.NewLRU(evictSize, nil)
if err != nil {
return nil, err
}
// Initialize the cache
c := &TwoQueueCache{
size: size,
recentSize: recentSize,
recent: recent,
frequent: frequent,
recentEvict: recentEvict,
}
return c, nil
}
// Get looks up a key's value from the cache.
func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if this is a frequent value
if val, ok := c.frequent.Get(key); ok {
return val, ok
}
// If the value is contained in recent, then we
// promote it to frequent
if val, ok := c.recent.Peek(key); ok {
c.recent.Remove(key)
c.frequent.Add(key, val)
return val, ok
}
// No hit
return nil, false
}
// Add adds a value to the cache.
func (c *TwoQueueCache) Add(key, value interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if the value is frequently used already,
// and just update the value
if c.frequent.Contains(key) {
c.frequent.Add(key, value)
return
}
// Check if the value is recently used, and promote
// the value into the frequent list
if c.recent.Contains(key) {
c.recent.Remove(key)
c.frequent.Add(key, value)
return
}
// If the value was recently evicted, add it to the
// frequently used list
if c.recentEvict.Contains(key) {
c.ensureSpace(true)
c.recentEvict.Remove(key)
c.frequent.Add(key, value)
return
}
// Add to the recently seen list
c.ensureSpace(false)
c.recent.Add(key, value)
return
}
// ensureSpace is used to ensure we have space in the cache
func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
// If we have space, nothing to do
recentLen := c.recent.Len()
freqLen := c.frequent.Len()
if recentLen+freqLen < c.size {
return
}
// If the recent buffer is larger than
// the target, evict from there
if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
k, _, _ := c.recent.RemoveOldest()
c.recentEvict.Add(k, nil)
return
}
// Remove from the frequent list otherwise
c.frequent.RemoveOldest()
}
// Len returns the number of items in the cache.
func (c *TwoQueueCache) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.recent.Len() + c.frequent.Len()
}
// Keys returns a slice of the keys in the cache.
// The frequently used keys are first in the returned slice.
func (c *TwoQueueCache) Keys() []interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
k1 := c.frequent.Keys()
k2 := c.recent.Keys()
return append(k1, k2...)
}
// Remove removes the provided key from the cache.
func (c *TwoQueueCache) Remove(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
if c.frequent.Remove(key) {
return
}
if c.recent.Remove(key) {
return
}
if c.recentEvict.Remove(key) {
return
}
}
// Purge is used to completely clear the cache.
func (c *TwoQueueCache) Purge() {
c.lock.Lock()
defer c.lock.Unlock()
c.recent.Purge()
c.frequent.Purge()
c.recentEvict.Purge()
}
// Contains is used to check if the cache contains a key
// without updating recency or frequency.
func (c *TwoQueueCache) Contains(key interface{}) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.frequent.Contains(key) || c.recent.Contains(key)
}
// Peek is used to inspect the cache value of a key
// without updating recency or frequency.
func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if val, ok := c.frequent.Peek(key); ok {
return val, ok
}
return c.recent.Peek(key)
}

362
vendor/github.com/hashicorp/golang-lru/LICENSE generated vendored Normal file
View File

@ -0,0 +1,362 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

25
vendor/github.com/hashicorp/golang-lru/README.md generated vendored Normal file
View File

@ -0,0 +1,25 @@
golang-lru
==========
This provides the `lru` package which implements a fixed-size
thread safe LRU cache. It is based on the cache in Groupcache.
Documentation
=============
Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru)
Example
=======
Using the LRU is very simple:
```go
l, _ := New(128)
for i := 0; i < 256; i++ {
l.Add(i, nil)
}
if l.Len() != 128 {
panic(fmt.Sprintf("bad len: %v", l.Len()))
}
```

257
vendor/github.com/hashicorp/golang-lru/arc.go generated vendored Normal file
View File

@ -0,0 +1,257 @@
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/simplelru"
)
// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
// ARC is an enhancement over the standard LRU cache in that tracks both
// frequency and recency of use. This avoids a burst in access to new
// entries from evicting the frequently used older entries. It adds some
// additional tracking overhead to a standard LRU cache, computationally
// it is roughly 2x the cost, and the extra memory overhead is linear
// with the size of the cache. ARC has been patented by IBM, but is
// similar to the TwoQueueCache (2Q) which requires setting parameters.
type ARCCache struct {
size int // Size is the total capacity of the cache
p int // P is the dynamic preference towards T1 or T2
t1 simplelru.LRUCache // T1 is the LRU for recently accessed items
b1 simplelru.LRUCache // B1 is the LRU for evictions from t1
t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items
b2 simplelru.LRUCache // B2 is the LRU for evictions from t2
lock sync.RWMutex
}
// NewARC creates an ARC of the given size
func NewARC(size int) (*ARCCache, error) {
// Create the sub LRUs
b1, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
b2, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
t1, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
t2, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
// Initialize the ARC
c := &ARCCache{
size: size,
p: 0,
t1: t1,
b1: b1,
t2: t2,
b2: b2,
}
return c, nil
}
// Get looks up a key's value from the cache.
func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
// If the value is contained in T1 (recent), then
// promote it to T2 (frequent)
if val, ok := c.t1.Peek(key); ok {
c.t1.Remove(key)
c.t2.Add(key, val)
return val, ok
}
// Check if the value is contained in T2 (frequent)
if val, ok := c.t2.Get(key); ok {
return val, ok
}
// No hit
return nil, false
}
// Add adds a value to the cache.
func (c *ARCCache) Add(key, value interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if the value is contained in T1 (recent), and potentially
// promote it to frequent T2
if c.t1.Contains(key) {
c.t1.Remove(key)
c.t2.Add(key, value)
return
}
// Check if the value is already in T2 (frequent) and update it
if c.t2.Contains(key) {
c.t2.Add(key, value)
return
}
// Check if this value was recently evicted as part of the
// recently used list
if c.b1.Contains(key) {
// T1 set is too small, increase P appropriately
delta := 1
b1Len := c.b1.Len()
b2Len := c.b2.Len()
if b2Len > b1Len {
delta = b2Len / b1Len
}
if c.p+delta >= c.size {
c.p = c.size
} else {
c.p += delta
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(false)
}
// Remove from B1
c.b1.Remove(key)
// Add the key to the frequently used list
c.t2.Add(key, value)
return
}
// Check if this value was recently evicted as part of the
// frequently used list
if c.b2.Contains(key) {
// T2 set is too small, decrease P appropriately
delta := 1
b1Len := c.b1.Len()
b2Len := c.b2.Len()
if b1Len > b2Len {
delta = b1Len / b2Len
}
if delta >= c.p {
c.p = 0
} else {
c.p -= delta
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(true)
}
// Remove from B2
c.b2.Remove(key)
// Add the key to the frequently used list
c.t2.Add(key, value)
return
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(false)
}
// Keep the size of the ghost buffers trim
if c.b1.Len() > c.size-c.p {
c.b1.RemoveOldest()
}
if c.b2.Len() > c.p {
c.b2.RemoveOldest()
}
// Add to the recently seen list
c.t1.Add(key, value)
return
}
// replace is used to adaptively evict from either T1 or T2
// based on the current learned value of P
func (c *ARCCache) replace(b2ContainsKey bool) {
t1Len := c.t1.Len()
if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
k, _, ok := c.t1.RemoveOldest()
if ok {
c.b1.Add(k, nil)
}
} else {
k, _, ok := c.t2.RemoveOldest()
if ok {
c.b2.Add(k, nil)
}
}
}
// Len returns the number of cached entries
func (c *ARCCache) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.t1.Len() + c.t2.Len()
}
// Keys returns all the cached keys
func (c *ARCCache) Keys() []interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
k1 := c.t1.Keys()
k2 := c.t2.Keys()
return append(k1, k2...)
}
// Remove is used to purge a key from the cache
func (c *ARCCache) Remove(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
if c.t1.Remove(key) {
return
}
if c.t2.Remove(key) {
return
}
if c.b1.Remove(key) {
return
}
if c.b2.Remove(key) {
return
}
}
// Purge is used to clear the cache
func (c *ARCCache) Purge() {
c.lock.Lock()
defer c.lock.Unlock()
c.t1.Purge()
c.t2.Purge()
c.b1.Purge()
c.b2.Purge()
}
// Contains is used to check if the cache contains a key
// without updating recency or frequency.
func (c *ARCCache) Contains(key interface{}) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.t1.Contains(key) || c.t2.Contains(key)
}
// Peek is used to inspect the cache value of a key
// without updating recency or frequency.
func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if val, ok := c.t1.Peek(key); ok {
return val, ok
}
return c.t2.Peek(key)
}

21
vendor/github.com/hashicorp/golang-lru/doc.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Package lru provides three different LRU caches of varying sophistication.
//
// Cache is a simple LRU cache. It is based on the
// LRU implementation in groupcache:
// https://github.com/golang/groupcache/tree/master/lru
//
// TwoQueueCache tracks frequently used and recently used entries separately.
// This avoids a burst of accesses from taking out frequently used entries,
// at the cost of about 2x computational overhead and some extra bookkeeping.
//
// ARCCache is an adaptive replacement cache. It tracks recent evictions as
// well as recent usage in both the frequent and recent caches. Its
// computational overhead is comparable to TwoQueueCache, but the memory
// overhead is linear with the size of the cache.
//
// ARC has been patented by IBM, so do not use it if that is problematic for
// your program.
//
// All caches in this package take locks while operating, and are therefore
// thread-safe for consumers.
package lru

110
vendor/github.com/hashicorp/golang-lru/lru.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/simplelru"
)
// Cache is a thread-safe fixed size LRU cache.
type Cache struct {
lru simplelru.LRUCache
lock sync.RWMutex
}
// New creates an LRU of the given size.
func New(size int) (*Cache, error) {
return NewWithEvict(size, nil)
}
// NewWithEvict constructs a fixed size cache with the given eviction
// callback.
func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
if err != nil {
return nil, err
}
c := &Cache{
lru: lru,
}
return c, nil
}
// Purge is used to completely clear the cache.
func (c *Cache) Purge() {
c.lock.Lock()
c.lru.Purge()
c.lock.Unlock()
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *Cache) Add(key, value interface{}) (evicted bool) {
c.lock.Lock()
defer c.lock.Unlock()
return c.lru.Add(key, value)
}
// Get looks up a key's value from the cache.
func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
return c.lru.Get(key)
}
// Contains checks if a key is in the cache, without updating the
// recent-ness or deleting it for being stale.
func (c *Cache) Contains(key interface{}) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.lru.Contains(key)
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
return c.lru.Peek(key)
}
// ContainsOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
c.lock.Lock()
defer c.lock.Unlock()
if c.lru.Contains(key) {
return true, false
}
evicted = c.lru.Add(key, value)
return false, evicted
}
// Remove removes the provided key from the cache.
func (c *Cache) Remove(key interface{}) {
c.lock.Lock()
c.lru.Remove(key)
c.lock.Unlock()
}
// RemoveOldest removes the oldest item from the cache.
func (c *Cache) RemoveOldest() {
c.lock.Lock()
c.lru.RemoveOldest()
c.lock.Unlock()
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *Cache) Keys() []interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
return c.lru.Keys()
}
// Len returns the number of items in the cache.
func (c *Cache) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.lru.Len()
}

161
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go generated vendored Normal file
View File

@ -0,0 +1,161 @@
package simplelru
import (
"container/list"
"errors"
)
// EvictCallback is used to get a callback when a cache entry is evicted
type EvictCallback func(key interface{}, value interface{})
// LRU implements a non-thread safe fixed size LRU cache
type LRU struct {
size int
evictList *list.List
items map[interface{}]*list.Element
onEvict EvictCallback
}
// entry is used to hold a value in the evictList
type entry struct {
key interface{}
value interface{}
}
// NewLRU constructs an LRU of the given size
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
if size <= 0 {
return nil, errors.New("Must provide a positive size")
}
c := &LRU{
size: size,
evictList: list.New(),
items: make(map[interface{}]*list.Element),
onEvict: onEvict,
}
return c, nil
}
// Purge is used to completely clear the cache.
func (c *LRU) Purge() {
for k, v := range c.items {
if c.onEvict != nil {
c.onEvict(k, v.Value.(*entry).value)
}
delete(c.items, k)
}
c.evictList.Init()
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *LRU) Add(key, value interface{}) (evicted bool) {
// Check for existing item
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
ent.Value.(*entry).value = value
return false
}
// Add new item
ent := &entry{key, value}
entry := c.evictList.PushFront(ent)
c.items[key] = entry
evict := c.evictList.Len() > c.size
// Verify size not exceeded
if evict {
c.removeOldest()
}
return evict
}
// Get looks up a key's value from the cache.
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
return ent.Value.(*entry).value, true
}
return
}
// Contains checks if a key is in the cache, without updating the recent-ness
// or deleting it for being stale.
func (c *LRU) Contains(key interface{}) (ok bool) {
_, ok = c.items[key]
return ok
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
var ent *list.Element
if ent, ok = c.items[key]; ok {
return ent.Value.(*entry).value, true
}
return nil, ok
}
// Remove removes the provided key from the cache, returning if the
// key was contained.
func (c *LRU) Remove(key interface{}) (present bool) {
if ent, ok := c.items[key]; ok {
c.removeElement(ent)
return true
}
return false
}
// RemoveOldest removes the oldest item from the cache.
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
ent := c.evictList.Back()
if ent != nil {
c.removeElement(ent)
kv := ent.Value.(*entry)
return kv.key, kv.value, true
}
return nil, nil, false
}
// GetOldest returns the oldest entry
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
ent := c.evictList.Back()
if ent != nil {
kv := ent.Value.(*entry)
return kv.key, kv.value, true
}
return nil, nil, false
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *LRU) Keys() []interface{} {
keys := make([]interface{}, len(c.items))
i := 0
for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
keys[i] = ent.Value.(*entry).key
i++
}
return keys
}
// Len returns the number of items in the cache.
func (c *LRU) Len() int {
return c.evictList.Len()
}
// removeOldest removes the oldest item from the cache.
func (c *LRU) removeOldest() {
ent := c.evictList.Back()
if ent != nil {
c.removeElement(ent)
}
}
// removeElement is used to remove a given list element from the cache
func (c *LRU) removeElement(e *list.Element) {
c.evictList.Remove(e)
kv := e.Value.(*entry)
delete(c.items, kv.key)
if c.onEvict != nil {
c.onEvict(kv.key, kv.value)
}
}

View File

@ -0,0 +1,37 @@
package simplelru
// LRUCache is the interface for simple LRU cache.
type LRUCache interface {
// Adds a value to the cache, returns true if an eviction occurred and
// updates the "recently used"-ness of the key.
Add(key, value interface{}) bool
// Returns key's value from the cache and
// updates the "recently used"-ness of the key. #value, isFound
Get(key interface{}) (value interface{}, ok bool)
// Check if a key exsists in cache without updating the recent-ness.
Contains(key interface{}) (ok bool)
// Returns key's value without updating the "recently used"-ness of the key.
Peek(key interface{}) (value interface{}, ok bool)
// Removes a key from the cache.
Remove(key interface{}) bool
// Removes the oldest entry from cache.
RemoveOldest() (interface{}, interface{}, bool)
// Returns the oldest entry from the cache. #key, value, isFound
GetOldest() (interface{}, interface{}, bool)
// Returns a slice of the keys in the cache, from oldest to newest.
Keys() []interface{}
// Returns the number of items in the cache.
Len() int
// Clear all cache entries
Purge()
}

View File

@ -0,0 +1,60 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"register_test.go",
"roundtrip_test.go",
],
importpath = "k8s.io/apimachinery/pkg/apis/meta/internalversion",
library = ":go_default_library",
deps = [
"//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/fuzzer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"conversion.go",
"doc.go",
"register.go",
"types.go",
"zz_generated.conversion.go",
"zz_generated.deepcopy.go",
],
importpath = "k8s.io/apimachinery/pkg/apis/meta/internalversion",
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,77 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internalversion
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/util/validation/field"
)
func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *metav1.ListOptions, s conversion.Scope) error {
if err := metav1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil {
return err
}
if err := metav1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil {
return err
}
out.IncludeUninitialized = in.IncludeUninitialized
out.ResourceVersion = in.ResourceVersion
out.TimeoutSeconds = in.TimeoutSeconds
out.Watch = in.Watch
out.Limit = in.Limit
out.Continue = in.Continue
return nil
}
func Convert_v1_ListOptions_To_internalversion_ListOptions(in *metav1.ListOptions, out *ListOptions, s conversion.Scope) error {
if err := metav1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil {
return err
}
if err := metav1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil {
return err
}
out.IncludeUninitialized = in.IncludeUninitialized
out.ResourceVersion = in.ResourceVersion
out.TimeoutSeconds = in.TimeoutSeconds
out.Watch = in.Watch
out.Limit = in.Limit
out.Continue = in.Continue
return nil
}
func Convert_map_to_v1_LabelSelector(in *map[string]string, out *metav1.LabelSelector, s conversion.Scope) error {
if in == nil {
return nil
}
out = new(metav1.LabelSelector)
for labelKey, labelValue := range *in {
metav1.AddLabelToSelector(out, labelKey, labelValue)
}
return nil
}
func Convert_v1_LabelSelector_to_map(in *metav1.LabelSelector, out *map[string]string, s conversion.Scope) error {
var err error
*out, err = metav1.LabelSelectorAsMap(in)
if err != nil {
err = field.Invalid(field.NewPath("labelSelector"), *in, fmt.Sprintf("cannot convert to old selector: %v", err))
}
return err
}

View File

@ -0,0 +1,19 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
package internalversion

View File

@ -0,0 +1,105 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internalversion
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1alpha1 "k8s.io/apimachinery/pkg/apis/meta/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
)
// GroupName is the group name for this API.
const GroupName = "meta.k8s.io"
// Scheme is the registry for any type that adheres to the meta API spec.
var scheme = runtime.NewScheme()
var (
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
// Codecs provides access to encoding and decoding for the scheme.
var Codecs = serializer.NewCodecFactory(scheme)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// ParameterCodec handles versioning of objects that are converted to query parameters.
var ParameterCodec = runtime.NewParameterCodec(scheme)
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// addToGroupVersion registers common meta types into schemas.
func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) error {
if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
return err
}
scheme.AddConversionFuncs(
metav1.Convert_string_To_labels_Selector,
metav1.Convert_labels_Selector_To_string,
metav1.Convert_string_To_fields_Selector,
metav1.Convert_fields_Selector_To_string,
Convert_map_to_v1_LabelSelector,
Convert_v1_LabelSelector_to_map,
Convert_internalversion_ListOptions_To_v1_ListOptions,
Convert_v1_ListOptions_To_internalversion_ListOptions,
)
// ListOptions is the only options struct which needs conversion (it exposes labels and fields
// as selectors for convenience). The other types have only a single representation today.
scheme.AddKnownTypes(SchemeGroupVersion,
&ListOptions{},
&metav1.GetOptions{},
&metav1.ExportOptions{},
&metav1.DeleteOptions{},
)
scheme.AddKnownTypes(SchemeGroupVersion,
&metav1alpha1.Table{},
&metav1alpha1.TableOptions{},
&metav1alpha1.PartialObjectMetadata{},
&metav1alpha1.PartialObjectMetadataList{},
)
scheme.AddKnownTypes(metav1alpha1.SchemeGroupVersion,
&metav1alpha1.Table{},
&metav1alpha1.TableOptions{},
&metav1alpha1.PartialObjectMetadata{},
&metav1alpha1.PartialObjectMetadataList{},
)
// Allow delete options to be decoded across all version in this scheme (we may want to be more clever than this)
scheme.AddUnversionedTypes(SchemeGroupVersion, &metav1.DeleteOptions{})
metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion)
return nil
}
// Unlike other API groups, meta internal knows about all meta external versions, but keeps
// the logic for conversion private.
func init() {
if err := addToGroupVersion(scheme, SchemeGroupVersion); err != nil {
panic(err)
}
}

View File

@ -0,0 +1,70 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internalversion
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ListOptions is the query options to a standard REST list call.
type ListOptions struct {
metav1.TypeMeta
// A selector based on labels
LabelSelector labels.Selector
// A selector based on fields
FieldSelector fields.Selector
// If true, partially initialized resources are included in the response.
// +optional
IncludeUninitialized bool
// If true, watch for changes to this list
Watch bool
// When specified with a watch call, shows changes that occur after that particular version of a resource.
// Defaults to changes from the beginning of history.
// When specified for list:
// - if unset, then the result is returned from remote storage based on quorum-read flag;
// - if it's 0, then we simply return what we currently have in cache, no guarantee;
// - if set to non zero, then the result is at least as fresh as given rv.
ResourceVersion string
// Timeout for the list/watch call.
TimeoutSeconds *int64
// Limit specifies the maximum number of results to return from the server. The server may
// not support this field on all resource types, but if it does and more results remain it
// will set the continue field on the returned list object.
Limit int64
// Continue is a token returned by the server that lets a client retrieve chunks of results
// from the server by specifying limit. The server may reject requests for continuation tokens
// it does not recognize and will return a 410 error if the token can no longer be used because
// it has expired.
Continue string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// List holds a list of objects, which may not be known by the server.
type List struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
Items []runtime.Object
}

View File

@ -0,0 +1,113 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by conversion-gen. Do not edit it manually!
package internalversion
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
unsafe "unsafe"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(scheme *runtime.Scheme) error {
return scheme.AddGeneratedConversionFuncs(
Convert_internalversion_List_To_v1_List,
Convert_v1_List_To_internalversion_List,
Convert_internalversion_ListOptions_To_v1_ListOptions,
Convert_v1_ListOptions_To_internalversion_ListOptions,
)
}
func autoConvert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]runtime.RawExtension, len(*in))
for i := range *in {
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = make([]runtime.RawExtension, 0)
}
return nil
}
// Convert_internalversion_List_To_v1_List is an autogenerated conversion function.
func Convert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error {
return autoConvert_internalversion_List_To_v1_List(in, out, s)
}
func autoConvert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]runtime.Object, len(*in))
for i := range *in {
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_List_To_internalversion_List is an autogenerated conversion function.
func Convert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error {
return autoConvert_v1_List_To_internalversion_List(in, out, s)
}
func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error {
if err := v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil {
return err
}
if err := v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil {
return err
}
out.IncludeUninitialized = in.IncludeUninitialized
out.Watch = in.Watch
out.ResourceVersion = in.ResourceVersion
out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds))
return nil
}
func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error {
if err := v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil {
return err
}
if err := v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil {
return err
}
out.IncludeUninitialized = in.IncludeUninitialized
out.Watch = in.Watch
out.ResourceVersion = in.ResourceVersion
out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds))
return nil
}

View File

@ -0,0 +1,108 @@
// +build !ignore_autogenerated
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package internalversion
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *List) DeepCopyInto(out *List) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]runtime.Object, len(*in))
for i := range *in {
if (*in)[i] == nil {
(*out)[i] = nil
} else {
(*out)[i] = (*in)[i].DeepCopyObject()
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List.
func (in *List) DeepCopy() *List {
if in == nil {
return nil
}
out := new(List)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *List) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ListOptions) DeepCopyInto(out *ListOptions) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.LabelSelector == nil {
out.LabelSelector = nil
} else {
out.LabelSelector = in.LabelSelector.DeepCopySelector()
}
if in.FieldSelector == nil {
out.FieldSelector = nil
} else {
out.FieldSelector = in.FieldSelector.DeepCopySelector()
}
if in.TimeoutSeconds != nil {
in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
if *in == nil {
*out = nil
} else {
*out = new(int64)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions.
func (in *ListOptions) DeepCopy() *ListOptions {
if in == nil {
return nil
}
out := new(ListOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ListOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}

View File

@ -9,26 +9,25 @@ load(
go_test( go_test(
name = "go_default_test", name = "go_default_test",
srcs = [ srcs = [
"backoff_test.go", "cache_test.go",
"throttle_test.go", "lruexpirecache_test.go",
], ],
importpath = "k8s.io/client-go/util/flowcontrol", importpath = "k8s.io/apimachinery/pkg/util/cache",
library = ":go_default_library", library = ":go_default_library",
deps = ["//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library"], deps = [
"//vendor/github.com/golang/groupcache/lru:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
],
) )
go_library( go_library(
name = "go_default_library", name = "go_default_library",
srcs = [ srcs = [
"backoff.go", "cache.go",
"throttle.go", "lruexpirecache.go",
],
importpath = "k8s.io/client-go/util/flowcontrol",
deps = [
"//vendor/github.com/juju/ratelimit:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//vendor/k8s.io/client-go/util/integer:go_default_library",
], ],
importpath = "k8s.io/apimachinery/pkg/util/cache",
deps = ["//vendor/github.com/hashicorp/golang-lru:go_default_library"],
) )
filegroup( filegroup(

83
vendor/k8s.io/apimachinery/pkg/util/cache/cache.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
)
const (
shardsCount int = 32
)
type Cache []*cacheShard
func NewCache(maxSize int) Cache {
if maxSize < shardsCount {
maxSize = shardsCount
}
cache := make(Cache, shardsCount)
for i := 0; i < shardsCount; i++ {
cache[i] = &cacheShard{
items: make(map[uint64]interface{}),
maxSize: maxSize / shardsCount,
}
}
return cache
}
func (c Cache) getShard(index uint64) *cacheShard {
return c[index%uint64(shardsCount)]
}
// Returns true if object already existed, false otherwise.
func (c *Cache) Add(index uint64, obj interface{}) bool {
return c.getShard(index).add(index, obj)
}
func (c *Cache) Get(index uint64) (obj interface{}, found bool) {
return c.getShard(index).get(index)
}
type cacheShard struct {
items map[uint64]interface{}
sync.RWMutex
maxSize int
}
// Returns true if object already existed, false otherwise.
func (s *cacheShard) add(index uint64, obj interface{}) bool {
s.Lock()
defer s.Unlock()
_, isOverwrite := s.items[index]
if !isOverwrite && len(s.items) >= s.maxSize {
var randomKey uint64
for randomKey = range s.items {
break
}
delete(s.items, randomKey)
}
s.items[index] = obj
return isOverwrite
}
func (s *cacheShard) get(index uint64) (obj interface{}, found bool) {
s.RLock()
defer s.RUnlock()
obj, found = s.items[index]
return
}

View File

@ -0,0 +1,102 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
"time"
"github.com/hashicorp/golang-lru"
)
// Clock defines an interface for obtaining the current time
type Clock interface {
Now() time.Time
}
// realClock implements the Clock interface by calling time.Now()
type realClock struct{}
func (realClock) Now() time.Time { return time.Now() }
// LRUExpireCache is a cache that ensures the mostly recently accessed keys are returned with
// a ttl beyond which keys are forcibly expired.
type LRUExpireCache struct {
// clock is used to obtain the current time
clock Clock
cache *lru.Cache
lock sync.Mutex
}
// NewLRUExpireCache creates an expiring cache with the given size
func NewLRUExpireCache(maxSize int) *LRUExpireCache {
return NewLRUExpireCacheWithClock(maxSize, realClock{})
}
// NewLRUExpireCacheWithClock creates an expiring cache with the given size, using the specified clock to obtain the current time.
func NewLRUExpireCacheWithClock(maxSize int, clock Clock) *LRUExpireCache {
cache, err := lru.New(maxSize)
if err != nil {
// if called with an invalid size
panic(err)
}
return &LRUExpireCache{clock: clock, cache: cache}
}
type cacheEntry struct {
value interface{}
expireTime time.Time
}
// Add adds the value to the cache at key with the specified maximum duration.
func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Duration) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Add(key, &cacheEntry{value, c.clock.Now().Add(ttl)})
}
// Get returns the value at the specified key from the cache if it exists and is not
// expired, or returns false.
func (c *LRUExpireCache) Get(key interface{}) (interface{}, bool) {
c.lock.Lock()
defer c.lock.Unlock()
e, ok := c.cache.Get(key)
if !ok {
return nil, false
}
if c.clock.Now().After(e.(*cacheEntry).expireTime) {
c.cache.Remove(key)
return nil, false
}
return e.(*cacheEntry).value, true
}
// Remove removes the specified key from the cache if it exists
func (c *LRUExpireCache) Remove(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Remove(key)
}
// Keys returns all the keys in the cache, even if they are expired. Subsequent calls to
// get may return not found. It returns all keys from oldest to newest.
func (c *LRUExpireCache) Keys() []interface{} {
c.lock.Lock()
defer c.lock.Unlock()
return c.cache.Keys()
}

View File

@ -8,15 +8,19 @@ load(
go_test( go_test(
name = "go_default_test", name = "go_default_test",
srcs = ["integer_test.go"], srcs = ["diff_test.go"],
importpath = "k8s.io/client-go/util/integer", importpath = "k8s.io/apimachinery/pkg/util/diff",
library = ":go_default_library", library = ":go_default_library",
) )
go_library( go_library(
name = "go_default_library", name = "go_default_library",
srcs = ["integer.go"], srcs = ["diff.go"],
importpath = "k8s.io/client-go/util/integer", importpath = "k8s.io/apimachinery/pkg/util/diff",
deps = [
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
],
) )
filegroup( filegroup(

273
vendor/k8s.io/apimachinery/pkg/util/diff/diff.go generated vendored Normal file
View File

@ -0,0 +1,273 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diff
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"sort"
"strings"
"text/tabwriter"
"github.com/davecgh/go-spew/spew"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// StringDiff diffs a and b and returns a human readable diff.
func StringDiff(a, b string) string {
ba := []byte(a)
bb := []byte(b)
out := []byte{}
i := 0
for ; i < len(ba) && i < len(bb); i++ {
if ba[i] != bb[i] {
break
}
out = append(out, ba[i])
}
out = append(out, []byte("\n\nA: ")...)
out = append(out, ba[i:]...)
out = append(out, []byte("\n\nB: ")...)
out = append(out, bb[i:]...)
out = append(out, []byte("\n\n")...)
return string(out)
}
// ObjectDiff writes the two objects out as JSON and prints out the identical part of
// the objects followed by the remaining part of 'a' and finally the remaining part of 'b'.
// For debugging tests.
func ObjectDiff(a, b interface{}) string {
ab, err := json.Marshal(a)
if err != nil {
panic(fmt.Sprintf("a: %v", err))
}
bb, err := json.Marshal(b)
if err != nil {
panic(fmt.Sprintf("b: %v", err))
}
return StringDiff(string(ab), string(bb))
}
// ObjectGoPrintDiff is like ObjectDiff, but uses go-spew to print the objects,
// which shows absolutely everything by recursing into every single pointer
// (go's %#v formatters OTOH stop at a certain point). This is needed when you
// can't figure out why reflect.DeepEqual is returning false and nothing is
// showing you differences. This will.
func ObjectGoPrintDiff(a, b interface{}) string {
s := spew.ConfigState{DisableMethods: true}
return StringDiff(
s.Sprintf("%#v", a),
s.Sprintf("%#v", b),
)
}
func ObjectReflectDiff(a, b interface{}) string {
vA, vB := reflect.ValueOf(a), reflect.ValueOf(b)
if vA.Type() != vB.Type() {
return fmt.Sprintf("type A %T and type B %T do not match", a, b)
}
diffs := objectReflectDiff(field.NewPath("object"), vA, vB)
if len(diffs) == 0 {
return "<no diffs>"
}
out := []string{""}
for _, d := range diffs {
out = append(out,
fmt.Sprintf("%s:", d.path),
limit(fmt.Sprintf(" a: %#v", d.a), 80),
limit(fmt.Sprintf(" b: %#v", d.b), 80),
)
}
return strings.Join(out, "\n")
}
func limit(s string, max int) string {
if len(s) > max {
return s[:max]
}
return s
}
func public(s string) bool {
if len(s) == 0 {
return false
}
return s[:1] == strings.ToUpper(s[:1])
}
type diff struct {
path *field.Path
a, b interface{}
}
type orderedDiffs []diff
func (d orderedDiffs) Len() int { return len(d) }
func (d orderedDiffs) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d orderedDiffs) Less(i, j int) bool {
a, b := d[i].path.String(), d[j].path.String()
if a < b {
return true
}
return false
}
func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff {
switch a.Type().Kind() {
case reflect.Struct:
var changes []diff
for i := 0; i < a.Type().NumField(); i++ {
if !public(a.Type().Field(i).Name) {
if reflect.DeepEqual(a.Interface(), b.Interface()) {
continue
}
return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}}
}
if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 {
changes = append(changes, sub...)
}
}
return changes
case reflect.Ptr, reflect.Interface:
if a.IsNil() || b.IsNil() {
switch {
case a.IsNil() && b.IsNil():
return nil
case a.IsNil():
return []diff{{path: path, a: nil, b: b.Interface()}}
default:
return []diff{{path: path, a: a.Interface(), b: nil}}
}
}
return objectReflectDiff(path, a.Elem(), b.Elem())
case reflect.Chan:
if !reflect.DeepEqual(a.Interface(), b.Interface()) {
return []diff{{path: path, a: a.Interface(), b: b.Interface()}}
}
return nil
case reflect.Slice:
lA, lB := a.Len(), b.Len()
l := lA
if lB < lA {
l = lB
}
if lA == lB && lA == 0 {
if a.IsNil() != b.IsNil() {
return []diff{{path: path, a: a.Interface(), b: b.Interface()}}
}
return nil
}
var diffs []diff
for i := 0; i < l; i++ {
if !reflect.DeepEqual(a.Index(i), b.Index(i)) {
diffs = append(diffs, objectReflectDiff(path.Index(i), a.Index(i), b.Index(i))...)
}
}
for i := l; i < lA; i++ {
diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil})
}
for i := l; i < lB; i++ {
diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)})
}
return diffs
case reflect.Map:
if reflect.DeepEqual(a.Interface(), b.Interface()) {
return nil
}
aKeys := make(map[interface{}]interface{})
for _, key := range a.MapKeys() {
aKeys[key.Interface()] = a.MapIndex(key).Interface()
}
var missing []diff
for _, key := range b.MapKeys() {
if _, ok := aKeys[key.Interface()]; ok {
delete(aKeys, key.Interface())
if reflect.DeepEqual(a.MapIndex(key).Interface(), b.MapIndex(key).Interface()) {
continue
}
missing = append(missing, objectReflectDiff(path.Key(fmt.Sprintf("%s", key.Interface())), a.MapIndex(key), b.MapIndex(key))...)
continue
}
missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: nil, b: b.MapIndex(key).Interface()})
}
for key, value := range aKeys {
missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key)), a: value, b: nil})
}
if len(missing) == 0 {
missing = append(missing, diff{path: path, a: a.Interface(), b: b.Interface()})
}
sort.Sort(orderedDiffs(missing))
return missing
default:
if reflect.DeepEqual(a.Interface(), b.Interface()) {
return nil
}
if !a.CanInterface() {
return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}}
}
return []diff{{path: path, a: a.Interface(), b: b.Interface()}}
}
}
// ObjectGoPrintSideBySide prints a and b as textual dumps side by side,
// enabling easy visual scanning for mismatches.
func ObjectGoPrintSideBySide(a, b interface{}) string {
s := spew.ConfigState{
Indent: " ",
// Extra deep spew.
DisableMethods: true,
}
sA := s.Sdump(a)
sB := s.Sdump(b)
linesA := strings.Split(sA, "\n")
linesB := strings.Split(sB, "\n")
width := 0
for _, s := range linesA {
l := len(s)
if l > width {
width = l
}
}
for _, s := range linesB {
l := len(s)
if l > width {
width = l
}
}
buf := &bytes.Buffer{}
w := tabwriter.NewWriter(buf, width, 0, 1, ' ', 0)
max := len(linesA)
if len(linesB) > max {
max = len(linesB)
}
for i := 0; i < max; i++ {
var a, b string
if i < len(linesA) {
a = linesA[i]
}
if i < len(linesB) {
b = linesB[i]
}
fmt.Fprintf(w, "%s\t%s\n", a, b)
}
w.Flush()
return buf.String()
}

View File

@ -1,76 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"discovery_client.go",
"helper.go",
"restmapper.go",
"unstructured.go",
],
importpath = "k8s.io/client-go/discovery",
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
go_test(
name = "go_default_xtest",
srcs = [
"discovery_client_test.go",
"helper_blackbox_test.go",
"restmapper_test.go",
],
importpath = "k8s.io/client-go/discovery_test",
deps = [
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
"//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/rest/fake:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/discovery/cached:all-srcs",
"//staging/src/k8s.io/client-go/discovery/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,95 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"clientset.go",
"doc.go",
"import.go",
],
importpath = "k8s.io/client-go/kubernetes",
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/batch/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/networking/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/storage/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/fake:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/scheme:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/batch/v1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/networking/v1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/storage/v1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1:all-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,62 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"register.go",
],
importpath = "k8s.io/client-go/kubernetes/scheme",
deps = [
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/api/authentication/v1beta1:go_default_library",
"//vendor/k8s.io/api/authorization/v1:go_default_library",
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/batch/v2alpha1:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/events/v1beta1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/networking/v1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1alpha1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library",
"//vendor/k8s.io/api/settings/v1alpha1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1alpha1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,39 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"admissionregistration_client.go",
"doc.go",
"generated_expansion.go",
"initializerconfiguration.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,40 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"admissionregistration_client.go",
"doc.go",
"generated_expansion.go",
"mutatingwebhookconfiguration.go",
"validatingwebhookconfiguration.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,43 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"apps_client.go",
"controllerrevision.go",
"daemonset.go",
"deployment.go",
"doc.go",
"generated_expansion.go",
"replicaset.go",
"statefulset.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/apps/v1",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,45 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"apps_client.go",
"controllerrevision.go",
"deployment.go",
"doc.go",
"generated_expansion.go",
"scale.go",
"statefulset.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/apps/v1beta1",
deps = [
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,47 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"apps_client.go",
"controllerrevision.go",
"daemonset.go",
"deployment.go",
"doc.go",
"generated_expansion.go",
"replicaset.go",
"scale.go",
"statefulset.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/apps/v1beta2",
deps = [
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,40 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"authentication_client.go",
"doc.go",
"generated_expansion.go",
"tokenreview.go",
"tokenreview_expansion.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1",
deps = [
"//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,40 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"authentication_client.go",
"doc.go",
"generated_expansion.go",
"tokenreview.go",
"tokenreview_expansion.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1beta1",
deps = [
"//vendor/k8s.io/api/authentication/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,46 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"authorization_client.go",
"doc.go",
"generated_expansion.go",
"localsubjectaccessreview.go",
"localsubjectaccessreview_expansion.go",
"selfsubjectaccessreview.go",
"selfsubjectaccessreview_expansion.go",
"selfsubjectrulesreview.go",
"selfsubjectrulesreview_expansion.go",
"subjectaccessreview.go",
"subjectaccessreview_expansion.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1",
deps = [
"//vendor/k8s.io/api/authorization/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,46 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"authorization_client.go",
"doc.go",
"generated_expansion.go",
"localsubjectaccessreview.go",
"localsubjectaccessreview_expansion.go",
"selfsubjectaccessreview.go",
"selfsubjectaccessreview_expansion.go",
"selfsubjectrulesreview.go",
"selfsubjectrulesreview_expansion.go",
"subjectaccessreview.go",
"subjectaccessreview_expansion.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1beta1",
deps = [
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,42 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"autoscaling_client.go",
"doc.go",
"generated_expansion.go",
"horizontalpodautoscaler.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/autoscaling/v1",
deps = [
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,39 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"autoscaling_client.go",
"doc.go",
"generated_expansion.go",
"horizontalpodautoscaler.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,42 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"batch_client.go",
"doc.go",
"generated_expansion.go",
"job.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/batch/v1",
deps = [
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,42 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"batch_client.go",
"cronjob.go",
"doc.go",
"generated_expansion.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/batch/v1beta1",
deps = [
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,42 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"batch_client.go",
"cronjob.go",
"doc.go",
"generated_expansion.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/batch/v2alpha1",
deps = [
"//vendor/k8s.io/api/batch/v2alpha1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,43 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"certificates_client.go",
"certificatesigningrequest.go",
"certificatesigningrequest_expansion.go",
"doc.go",
"generated_expansion.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/certificates/v1beta1",
deps = [
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,68 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"componentstatus.go",
"configmap.go",
"core_client.go",
"doc.go",
"endpoints.go",
"event.go",
"event_expansion.go",
"generated_expansion.go",
"limitrange.go",
"namespace.go",
"namespace_expansion.go",
"node.go",
"node_expansion.go",
"persistentvolume.go",
"persistentvolumeclaim.go",
"pod.go",
"pod_expansion.go",
"podtemplate.go",
"replicationcontroller.go",
"resourcequota.go",
"secret.go",
"service.go",
"service_expansion.go",
"serviceaccount.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/core/v1",
deps = [
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,39 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"event.go",
"events_client.go",
"generated_expansion.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/events/v1beta1",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/api/events/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,51 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"daemonset.go",
"deployment.go",
"deployment_expansion.go",
"doc.go",
"extensions_client.go",
"generated_expansion.go",
"ingress.go",
"podsecuritypolicy.go",
"replicaset.go",
"scale.go",
"scale_expansion.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/extensions/v1beta1",
deps = [
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,42 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"generated_expansion.go",
"networking_client.go",
"networkpolicy.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/networking/v1",
deps = [
"//vendor/k8s.io/api/networking/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,44 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"eviction.go",
"eviction_expansion.go",
"generated_expansion.go",
"poddisruptionbudget.go",
"policy_client.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/policy/v1beta1",
deps = [
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,45 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"clusterrole.go",
"clusterrolebinding.go",
"doc.go",
"generated_expansion.go",
"rbac_client.go",
"role.go",
"rolebinding.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/rbac/v1",
deps = [
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,45 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"clusterrole.go",
"clusterrolebinding.go",
"doc.go",
"generated_expansion.go",
"rbac_client.go",
"role.go",
"rolebinding.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1",
deps = [
"//vendor/k8s.io/api/rbac/v1alpha1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,45 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"clusterrole.go",
"clusterrolebinding.go",
"doc.go",
"generated_expansion.go",
"rbac_client.go",
"role.go",
"rolebinding.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/rbac/v1beta1",
deps = [
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,42 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"generated_expansion.go",
"priorityclass.go",
"scheduling_client.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1",
deps = [
"//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,42 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"generated_expansion.go",
"podpreset.go",
"settings_client.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/settings/v1alpha1",
deps = [
"//vendor/k8s.io/api/settings/v1alpha1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,42 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"generated_expansion.go",
"storage_client.go",
"storageclass.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/storage/v1",
deps = [
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,39 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"generated_expansion.go",
"storage_client.go",
"volumeattachment.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/storage/v1alpha1",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/api/storage/v1alpha1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,42 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"generated_expansion.go",
"storage_client.go",
"storageclass.go",
],
importpath = "k8s.io/client-go/kubernetes/typed/storage/v1beta1",
deps = [
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,30 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"base.go",
"doc.go",
"version.go",
],
importpath = "k8s.io/client-go/pkg/version",
deps = ["//vendor/k8s.io/apimachinery/pkg/version:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

100
vendor/k8s.io/client-go/rest/BUILD generated vendored
View File

@ -1,100 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"client_test.go",
"config_test.go",
"plugin_test.go",
"request_test.go",
"url_utils_test.go",
"urlbackoff_test.go",
],
importpath = "k8s.io/client-go/rest",
library = ":go_default_library",
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/google/gofuzz:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/httpstream:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest/watch:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"client.go",
"config.go",
"plugin.go",
"request.go",
"transport.go",
"url_utils.go",
"urlbackoff.go",
"versions.go",
"zz_generated.deepcopy.go",
],
importpath = "k8s.io/client-go/rest",
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/golang.org/x/net/http2:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/pkg/version:go_default_library",
"//vendor/k8s.io/client-go/rest/watch:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//vendor/k8s.io/client-go/tools/metrics:go_default_library",
"//vendor/k8s.io/client-go/transport:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/rest/fake:all-srcs",
"//staging/src/k8s.io/client-go/rest/watch:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,57 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"decoder.go",
"encoder.go",
],
importpath = "k8s.io/client-go/rest/watch",
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
],
)
go_test(
name = "go_default_xtest",
srcs = [
"decoder_test.go",
"encoder_test.go",
],
importpath = "k8s.io/client-go/rest/watch_test",
deps = [
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest/watch:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,34 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["clientauth.go"],
importpath = "k8s.io/client-go/tools/auth",
deps = ["//vendor/k8s.io/client-go/rest:go_default_library"],
)
go_test(
name = "go_default_xtest",
srcs = ["clientauth_test.go"],
importpath = "k8s.io/client-go/tools/auth_test",
deps = ["//vendor/k8s.io/client-go/tools/auth:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

50
vendor/k8s.io/client-go/tools/cache/OWNERS generated vendored Executable file
View File

@ -0,0 +1,50 @@
approvers:
- thockin
- lavalamp
- smarterclayton
- wojtek-t
- deads2k
- caesarxuchao
- liggitt
- ncdc
reviewers:
- thockin
- lavalamp
- smarterclayton
- wojtek-t
- deads2k
- brendandburns
- derekwaynecarr
- caesarxuchao
- mikedanese
- liggitt
- nikhiljindal
- erictune
- davidopp
- pmorie
- kargakis
- janetkuo
- justinsb
- eparis
- soltysh
- jsafrane
- dims
- madhusudancs
- hongchaodeng
- krousey
- markturansky
- fgrzadkowski
- xiang90
- mml
- ingvagabund
- resouer
- jessfraz
- david-mcmahon
- mfojtik
- '249043822'
- lixiaobing10051267
- ddysher
- mqliang
- feihujiang
- sdminonne
- ncdc

394
vendor/k8s.io/client-go/tools/cache/controller.go generated vendored Normal file
View File

@ -0,0 +1,394 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
"time"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
)
// Config contains all the settings for a Controller.
type Config struct {
// The queue for your objects; either a FIFO or
// a DeltaFIFO. Your Process() function should accept
// the output of this Queue's Pop() method.
Queue
// Something that can list and watch your objects.
ListerWatcher
// Something that can process your objects.
Process ProcessFunc
// The type of your objects.
ObjectType runtime.Object
// Reprocess everything at least this often.
// Note that if it takes longer for you to clear the queue than this
// period, you will end up processing items in the order determined
// by FIFO.Replace(). Currently, this is random. If this is a
// problem, we can change that replacement policy to append new
// things to the end of the queue instead of replacing the entire
// queue.
FullResyncPeriod time.Duration
// ShouldResync, if specified, is invoked when the controller's reflector determines the next
// periodic sync should occur. If this returns true, it means the reflector should proceed with
// the resync.
ShouldResync ShouldResyncFunc
// If true, when Process() returns an error, re-enqueue the object.
// TODO: add interface to let you inject a delay/backoff or drop
// the object completely if desired. Pass the object in
// question to this interface as a parameter.
RetryOnError bool
}
// ShouldResyncFunc is a type of function that indicates if a reflector should perform a
// resync or not. It can be used by a shared informer to support multiple event handlers with custom
// resync periods.
type ShouldResyncFunc func() bool
// ProcessFunc processes a single object.
type ProcessFunc func(obj interface{}) error
// Controller is a generic controller framework.
type controller struct {
config Config
reflector *Reflector
reflectorMutex sync.RWMutex
clock clock.Clock
}
type Controller interface {
Run(stopCh <-chan struct{})
HasSynced() bool
LastSyncResourceVersion() string
}
// New makes a new Controller from the given Config.
func New(c *Config) Controller {
ctlr := &controller{
config: *c,
clock: &clock.RealClock{},
}
return ctlr
}
// Run begins processing items, and will continue until a value is sent down stopCh.
// It's an error to call Run more than once.
// Run blocks; call via go.
func (c *controller) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
go func() {
<-stopCh
c.config.Queue.Close()
}()
r := NewReflector(
c.config.ListerWatcher,
c.config.ObjectType,
c.config.Queue,
c.config.FullResyncPeriod,
)
r.ShouldResync = c.config.ShouldResync
r.clock = c.clock
c.reflectorMutex.Lock()
c.reflector = r
c.reflectorMutex.Unlock()
var wg wait.Group
defer wg.Wait()
wg.StartWithChannel(stopCh, r.Run)
wait.Until(c.processLoop, time.Second, stopCh)
}
// Returns true once this controller has completed an initial resource listing
func (c *controller) HasSynced() bool {
return c.config.Queue.HasSynced()
}
func (c *controller) LastSyncResourceVersion() string {
if c.reflector == nil {
return ""
}
return c.reflector.LastSyncResourceVersion()
}
// processLoop drains the work queue.
// TODO: Consider doing the processing in parallel. This will require a little thought
// to make sure that we don't end up processing the same object multiple times
// concurrently.
//
// TODO: Plumb through the stopCh here (and down to the queue) so that this can
// actually exit when the controller is stopped. Or just give up on this stuff
// ever being stoppable. Converting this whole package to use Context would
// also be helpful.
func (c *controller) processLoop() {
for {
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
if err != nil {
if err == FIFOClosedError {
return
}
if c.config.RetryOnError {
// This is the safe way to re-enqueue.
c.config.Queue.AddIfNotPresent(obj)
}
}
}
}
// ResourceEventHandler can handle notifications for events that happen to a
// resource. The events are informational only, so you can't return an
// error.
// * OnAdd is called when an object is added.
// * OnUpdate is called when an object is modified. Note that oldObj is the
// last known state of the object-- it is possible that several changes
// were combined together, so you can't use this to see every single
// change. OnUpdate is also called when a re-list happens, and it will
// get called even if nothing changed. This is useful for periodically
// evaluating or syncing something.
// * OnDelete will get the final state of the item if it is known, otherwise
// it will get an object of type DeletedFinalStateUnknown. This can
// happen if the watch is closed and misses the delete event and we don't
// notice the deletion until the subsequent re-list.
type ResourceEventHandler interface {
OnAdd(obj interface{})
OnUpdate(oldObj, newObj interface{})
OnDelete(obj interface{})
}
// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or
// as few of the notification functions as you want while still implementing
// ResourceEventHandler.
type ResourceEventHandlerFuncs struct {
AddFunc func(obj interface{})
UpdateFunc func(oldObj, newObj interface{})
DeleteFunc func(obj interface{})
}
// OnAdd calls AddFunc if it's not nil.
func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) {
if r.AddFunc != nil {
r.AddFunc(obj)
}
}
// OnUpdate calls UpdateFunc if it's not nil.
func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) {
if r.UpdateFunc != nil {
r.UpdateFunc(oldObj, newObj)
}
}
// OnDelete calls DeleteFunc if it's not nil.
func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
if r.DeleteFunc != nil {
r.DeleteFunc(obj)
}
}
// FilteringResourceEventHandler applies the provided filter to all events coming
// in, ensuring the appropriate nested handler method is invoked. An object
// that starts passing the filter after an update is considered an add, and an
// object that stops passing the filter after an update is considered a delete.
type FilteringResourceEventHandler struct {
FilterFunc func(obj interface{}) bool
Handler ResourceEventHandler
}
// OnAdd calls the nested handler only if the filter succeeds
func (r FilteringResourceEventHandler) OnAdd(obj interface{}) {
if !r.FilterFunc(obj) {
return
}
r.Handler.OnAdd(obj)
}
// OnUpdate ensures the proper handler is called depending on whether the filter matches
func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) {
newer := r.FilterFunc(newObj)
older := r.FilterFunc(oldObj)
switch {
case newer && older:
r.Handler.OnUpdate(oldObj, newObj)
case newer && !older:
r.Handler.OnAdd(newObj)
case !newer && older:
r.Handler.OnDelete(oldObj)
default:
// do nothing
}
}
// OnDelete calls the nested handler only if the filter succeeds
func (r FilteringResourceEventHandler) OnDelete(obj interface{}) {
if !r.FilterFunc(obj) {
return
}
r.Handler.OnDelete(obj)
}
// DeletionHandlingMetaNamespaceKeyFunc checks for
// DeletedFinalStateUnknown objects before calling
// MetaNamespaceKeyFunc.
func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
if d, ok := obj.(DeletedFinalStateUnknown); ok {
return d.Key, nil
}
return MetaNamespaceKeyFunc(obj)
}
// NewInformer returns a Store and a controller for populating the store
// while also providing event notifications. You should only used the returned
// Store for Get/List operations; Add/Modify/Deletes will cause the event
// notifications to be faulty.
//
// Parameters:
// * lw is list and watch functions for the source of the resource you want to
// be informed of.
// * objType is an object of the type that you expect to receive.
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
// calls, even if nothing changed). Otherwise, re-list will be delayed as
// long as possible (until the upstream source closes the watch or times out,
// or you stop the controller).
// * h is the object you want notifications sent to.
//
func NewInformer(
lw ListerWatcher,
objType runtime.Object,
resyncPeriod time.Duration,
h ResourceEventHandler,
) (Store, Controller) {
// This will hold the client state, as we know it.
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
// This will hold incoming changes. Note how we pass clientState in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
cfg := &Config{
Queue: fifo,
ListerWatcher: lw,
ObjectType: objType,
FullResyncPeriod: resyncPeriod,
RetryOnError: false,
Process: func(obj interface{}) error {
// from oldest to newest
for _, d := range obj.(Deltas) {
switch d.Type {
case Sync, Added, Updated:
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
if err := clientState.Update(d.Object); err != nil {
return err
}
h.OnUpdate(old, d.Object)
} else {
if err := clientState.Add(d.Object); err != nil {
return err
}
h.OnAdd(d.Object)
}
case Deleted:
if err := clientState.Delete(d.Object); err != nil {
return err
}
h.OnDelete(d.Object)
}
}
return nil
},
}
return clientState, New(cfg)
}
// NewIndexerInformer returns a Indexer and a controller for populating the index
// while also providing event notifications. You should only used the returned
// Index for Get/List operations; Add/Modify/Deletes will cause the event
// notifications to be faulty.
//
// Parameters:
// * lw is list and watch functions for the source of the resource you want to
// be informed of.
// * objType is an object of the type that you expect to receive.
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
// calls, even if nothing changed). Otherwise, re-list will be delayed as
// long as possible (until the upstream source closes the watch or times out,
// or you stop the controller).
// * h is the object you want notifications sent to.
// * indexers is the indexer for the received object type.
//
func NewIndexerInformer(
lw ListerWatcher,
objType runtime.Object,
resyncPeriod time.Duration,
h ResourceEventHandler,
indexers Indexers,
) (Indexer, Controller) {
// This will hold the client state, as we know it.
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
// This will hold incoming changes. Note how we pass clientState in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
cfg := &Config{
Queue: fifo,
ListerWatcher: lw,
ObjectType: objType,
FullResyncPeriod: resyncPeriod,
RetryOnError: false,
Process: func(obj interface{}) error {
// from oldest to newest
for _, d := range obj.(Deltas) {
switch d.Type {
case Sync, Added, Updated:
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
if err := clientState.Update(d.Object); err != nil {
return err
}
h.OnUpdate(old, d.Object)
} else {
if err := clientState.Add(d.Object); err != nil {
return err
}
h.OnAdd(d.Object)
}
case Deleted:
if err := clientState.Delete(d.Object); err != nil {
return err
}
h.OnDelete(d.Object)
}
}
return nil
},
}
return clientState, New(cfg)
}

685
vendor/k8s.io/client-go/tools/cache/delta_fifo.go generated vendored Normal file
View File

@ -0,0 +1,685 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"errors"
"fmt"
"sync"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/golang/glog"
)
// NewDeltaFIFO returns a Store which can be used process changes to items.
//
// keyFunc is used to figure out what key an object should have. (It's
// exposed in the returned DeltaFIFO's KeyOf() method, with bonus features.)
//
// 'compressor' may compress as many or as few items as it wants
// (including returning an empty slice), but it should do what it
// does quickly since it is called while the queue is locked.
// 'compressor' may be nil if you don't want any delta compression.
//
// 'keyLister' is expected to return a list of keys that the consumer of
// this queue "knows about". It is used to decide which items are missing
// when Replace() is called; 'Deleted' deltas are produced for these items.
// It may be nil if you don't need to detect all deletions.
// TODO: consider merging keyLister with this object, tracking a list of
// "known" keys when Pop() is called. Have to think about how that
// affects error retrying.
// TODO(lavalamp): I believe there is a possible race only when using an
// external known object source that the above TODO would
// fix.
//
// Also see the comment on DeltaFIFO.
func NewDeltaFIFO(keyFunc KeyFunc, compressor DeltaCompressor, knownObjects KeyListerGetter) *DeltaFIFO {
f := &DeltaFIFO{
items: map[string]Deltas{},
queue: []string{},
keyFunc: keyFunc,
deltaCompressor: compressor,
knownObjects: knownObjects,
}
f.cond.L = &f.lock
return f
}
// DeltaFIFO is like FIFO, but allows you to process deletes.
//
// DeltaFIFO is a producer-consumer queue, where a Reflector is
// intended to be the producer, and the consumer is whatever calls
// the Pop() method.
//
// DeltaFIFO solves this use case:
// * You want to process every object change (delta) at most once.
// * When you process an object, you want to see everything
// that's happened to it since you last processed it.
// * You want to process the deletion of objects.
// * You might want to periodically reprocess objects.
//
// DeltaFIFO's Pop(), Get(), and GetByKey() methods return
// interface{} to satisfy the Store/Queue interfaces, but it
// will always return an object of type Deltas.
//
// A note on threading: If you call Pop() in parallel from multiple
// threads, you could end up with multiple threads processing slightly
// different versions of the same object.
//
// A note on the KeyLister used by the DeltaFIFO: It's main purpose is
// to list keys that are "known", for the purpose of figuring out which
// items have been deleted when Replace() or Delete() are called. The deleted
// object will be included in the DeleteFinalStateUnknown markers. These objects
// could be stale.
//
// You may provide a function to compress deltas (e.g., represent a
// series of Updates as a single Update).
type DeltaFIFO struct {
// lock/cond protects access to 'items' and 'queue'.
lock sync.RWMutex
cond sync.Cond
// We depend on the property that items in the set are in
// the queue and vice versa, and that all Deltas in this
// map have at least one Delta.
items map[string]Deltas
queue []string
// populated is true if the first batch of items inserted by Replace() has been populated
// or Delete/Add/Update was called first.
populated bool
// initialPopulationCount is the number of items inserted by the first call of Replace()
initialPopulationCount int
// keyFunc is used to make the key used for queued item
// insertion and retrieval, and should be deterministic.
keyFunc KeyFunc
// deltaCompressor tells us how to combine two or more
// deltas. It may be nil.
deltaCompressor DeltaCompressor
// knownObjects list keys that are "known", for the
// purpose of figuring out which items have been deleted
// when Replace() or Delete() is called.
knownObjects KeyListerGetter
// Indication the queue is closed.
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
// Currently, not used to gate any of CRED operations.
closed bool
closedLock sync.Mutex
}
var (
_ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue
)
var (
// ErrZeroLengthDeltasObject is returned in a KeyError if a Deltas
// object with zero length is encountered (should be impossible,
// even if such an object is accidentally produced by a DeltaCompressor--
// but included for completeness).
ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key")
)
// Close the queue.
func (f *DeltaFIFO) Close() {
f.closedLock.Lock()
defer f.closedLock.Unlock()
f.closed = true
f.cond.Broadcast()
}
// KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or
// DeletedFinalStateUnknown objects.
func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) {
if d, ok := obj.(Deltas); ok {
if len(d) == 0 {
return "", KeyError{obj, ErrZeroLengthDeltasObject}
}
obj = d.Newest().Object
}
if d, ok := obj.(DeletedFinalStateUnknown); ok {
return d.Key, nil
}
return f.keyFunc(obj)
}
// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
// or an Update called first but the first batch of items inserted by Replace() has been popped
func (f *DeltaFIFO) HasSynced() bool {
f.lock.Lock()
defer f.lock.Unlock()
return f.populated && f.initialPopulationCount == 0
}
// Add inserts an item, and puts it in the queue. The item is only enqueued
// if it doesn't already exist in the set.
func (f *DeltaFIFO) Add(obj interface{}) error {
f.lock.Lock()
defer f.lock.Unlock()
f.populated = true
return f.queueActionLocked(Added, obj)
}
// Update is just like Add, but makes an Updated Delta.
func (f *DeltaFIFO) Update(obj interface{}) error {
f.lock.Lock()
defer f.lock.Unlock()
f.populated = true
return f.queueActionLocked(Updated, obj)
}
// Delete is just like Add, but makes an Deleted Delta. If the item does not
// already exist, it will be ignored. (It may have already been deleted by a
// Replace (re-list), for example.
func (f *DeltaFIFO) Delete(obj interface{}) error {
id, err := f.KeyOf(obj)
if err != nil {
return KeyError{obj, err}
}
f.lock.Lock()
defer f.lock.Unlock()
f.populated = true
if f.knownObjects == nil {
if _, exists := f.items[id]; !exists {
// Presumably, this was deleted when a relist happened.
// Don't provide a second report of the same deletion.
return nil
}
} else {
// We only want to skip the "deletion" action if the object doesn't
// exist in knownObjects and it doesn't have corresponding item in items.
// Note that even if there is a "deletion" action in items, we can ignore it,
// because it will be deduped automatically in "queueActionLocked"
_, exists, err := f.knownObjects.GetByKey(id)
_, itemsExist := f.items[id]
if err == nil && !exists && !itemsExist {
// Presumably, this was deleted when a relist happened.
// Don't provide a second report of the same deletion.
// TODO(lavalamp): This may be racy-- we aren't properly locked
// with knownObjects.
return nil
}
}
return f.queueActionLocked(Deleted, obj)
}
// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already
// present in the set, it is neither enqueued nor added to the set.
//
// This is useful in a single producer/consumer scenario so that the consumer can
// safely retry items without contending with the producer and potentially enqueueing
// stale items.
//
// Important: obj must be a Deltas (the output of the Pop() function). Yes, this is
// different from the Add/Update/Delete functions.
func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error {
deltas, ok := obj.(Deltas)
if !ok {
return fmt.Errorf("object must be of type deltas, but got: %#v", obj)
}
id, err := f.KeyOf(deltas.Newest().Object)
if err != nil {
return KeyError{obj, err}
}
f.lock.Lock()
defer f.lock.Unlock()
f.addIfNotPresent(id, deltas)
return nil
}
// addIfNotPresent inserts deltas under id if it does not exist, and assumes the caller
// already holds the fifo lock.
func (f *DeltaFIFO) addIfNotPresent(id string, deltas Deltas) {
f.populated = true
if _, exists := f.items[id]; exists {
return
}
f.queue = append(f.queue, id)
f.items[id] = deltas
f.cond.Broadcast()
}
// re-listing and watching can deliver the same update multiple times in any
// order. This will combine the most recent two deltas if they are the same.
func dedupDeltas(deltas Deltas) Deltas {
n := len(deltas)
if n < 2 {
return deltas
}
a := &deltas[n-1]
b := &deltas[n-2]
if out := isDup(a, b); out != nil {
d := append(Deltas{}, deltas[:n-2]...)
return append(d, *out)
}
return deltas
}
// If a & b represent the same event, returns the delta that ought to be kept.
// Otherwise, returns nil.
// TODO: is there anything other than deletions that need deduping?
func isDup(a, b *Delta) *Delta {
if out := isDeletionDup(a, b); out != nil {
return out
}
// TODO: Detect other duplicate situations? Are there any?
return nil
}
// keep the one with the most information if both are deletions.
func isDeletionDup(a, b *Delta) *Delta {
if b.Type != Deleted || a.Type != Deleted {
return nil
}
// Do more sophisticated checks, or is this sufficient?
if _, ok := b.Object.(DeletedFinalStateUnknown); ok {
return a
}
return b
}
// willObjectBeDeletedLocked returns true only if the last delta for the
// given object is Delete. Caller must lock first.
func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool {
deltas := f.items[id]
return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted
}
// queueActionLocked appends to the delta list for the object, calling
// f.deltaCompressor if needed. Caller must lock first.
func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error {
id, err := f.KeyOf(obj)
if err != nil {
return KeyError{obj, err}
}
// If object is supposed to be deleted (last event is Deleted),
// then we should ignore Sync events, because it would result in
// recreation of this object.
if actionType == Sync && f.willObjectBeDeletedLocked(id) {
return nil
}
newDeltas := append(f.items[id], Delta{actionType, obj})
newDeltas = dedupDeltas(newDeltas)
if f.deltaCompressor != nil {
newDeltas = f.deltaCompressor.Compress(newDeltas)
}
_, exists := f.items[id]
if len(newDeltas) > 0 {
if !exists {
f.queue = append(f.queue, id)
}
f.items[id] = newDeltas
f.cond.Broadcast()
} else if exists {
// The compression step removed all deltas, so
// we need to remove this from our map (extra items
// in the queue are ignored if they are not in the
// map).
delete(f.items, id)
}
return nil
}
// List returns a list of all the items; it returns the object
// from the most recent Delta.
// You should treat the items returned inside the deltas as immutable.
func (f *DeltaFIFO) List() []interface{} {
f.lock.RLock()
defer f.lock.RUnlock()
return f.listLocked()
}
func (f *DeltaFIFO) listLocked() []interface{} {
list := make([]interface{}, 0, len(f.items))
for _, item := range f.items {
// Copy item's slice so operations on this slice (delta
// compression) won't interfere with the object we return.
item = copyDeltas(item)
list = append(list, item.Newest().Object)
}
return list
}
// ListKeys returns a list of all the keys of the objects currently
// in the FIFO.
func (f *DeltaFIFO) ListKeys() []string {
f.lock.RLock()
defer f.lock.RUnlock()
list := make([]string, 0, len(f.items))
for key := range f.items {
list = append(list, key)
}
return list
}
// Get returns the complete list of deltas for the requested item,
// or sets exists=false.
// You should treat the items returned inside the deltas as immutable.
func (f *DeltaFIFO) Get(obj interface{}) (item interface{}, exists bool, err error) {
key, err := f.KeyOf(obj)
if err != nil {
return nil, false, KeyError{obj, err}
}
return f.GetByKey(key)
}
// GetByKey returns the complete list of deltas for the requested item,
// setting exists=false if that list is empty.
// You should treat the items returned inside the deltas as immutable.
func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
f.lock.RLock()
defer f.lock.RUnlock()
d, exists := f.items[key]
if exists {
// Copy item's slice so operations on this slice (delta
// compression) won't interfere with the object we return.
d = copyDeltas(d)
}
return d, exists, nil
}
// Checks if the queue is closed
func (f *DeltaFIFO) IsClosed() bool {
f.closedLock.Lock()
defer f.closedLock.Unlock()
if f.closed {
return true
}
return false
}
// Pop blocks until an item is added to the queue, and then returns it. If
// multiple items are ready, they are returned in the order in which they were
// added/updated. The item is removed from the queue (and the store) before it
// is returned, so if you don't successfully process it, you need to add it back
// with AddIfNotPresent().
// process function is called under lock, so it is safe update data structures
// in it that need to be in sync with the queue (e.g. knownKeys). The PopProcessFunc
// may return an instance of ErrRequeue with a nested error to indicate the current
// item should be requeued (equivalent to calling AddIfNotPresent under the lock).
//
// Pop returns a 'Deltas', which has a complete list of all the things
// that happened to the object (deltas) while it was sitting in the queue.
func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
f.lock.Lock()
defer f.lock.Unlock()
for {
for len(f.queue) == 0 {
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
// When Close() is called, the f.closed is set and the condition is broadcasted.
// Which causes this loop to continue and return from the Pop().
if f.IsClosed() {
return nil, FIFOClosedError
}
f.cond.Wait()
}
id := f.queue[0]
f.queue = f.queue[1:]
item, ok := f.items[id]
if f.initialPopulationCount > 0 {
f.initialPopulationCount--
}
if !ok {
// Item may have been deleted subsequently.
continue
}
delete(f.items, id)
err := process(item)
if e, ok := err.(ErrRequeue); ok {
f.addIfNotPresent(id, item)
err = e.Err
}
// Don't need to copyDeltas here, because we're transferring
// ownership to the caller.
return item, err
}
}
// Replace will delete the contents of 'f', using instead the given map.
// 'f' takes ownership of the map, you should not reference the map again
// after calling this function. f's queue is reset, too; upon return, it
// will contain the items in the map, in no particular order.
func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
f.lock.Lock()
defer f.lock.Unlock()
keys := make(sets.String, len(list))
for _, item := range list {
key, err := f.KeyOf(item)
if err != nil {
return KeyError{item, err}
}
keys.Insert(key)
if err := f.queueActionLocked(Sync, item); err != nil {
return fmt.Errorf("couldn't enqueue object: %v", err)
}
}
if f.knownObjects == nil {
// Do deletion detection against our own list.
for k, oldItem := range f.items {
if keys.Has(k) {
continue
}
var deletedObj interface{}
if n := oldItem.Newest(); n != nil {
deletedObj = n.Object
}
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
return err
}
}
if !f.populated {
f.populated = true
f.initialPopulationCount = len(list)
}
return nil
}
// Detect deletions not already in the queue.
// TODO(lavalamp): This may be racy-- we aren't properly locked
// with knownObjects. Unproven.
knownKeys := f.knownObjects.ListKeys()
queuedDeletions := 0
for _, k := range knownKeys {
if keys.Has(k) {
continue
}
deletedObj, exists, err := f.knownObjects.GetByKey(k)
if err != nil {
deletedObj = nil
glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
} else if !exists {
deletedObj = nil
glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
}
queuedDeletions++
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
return err
}
}
if !f.populated {
f.populated = true
f.initialPopulationCount = len(list) + queuedDeletions
}
return nil
}
// Resync will send a sync event for each item
func (f *DeltaFIFO) Resync() error {
f.lock.Lock()
defer f.lock.Unlock()
if f.knownObjects == nil {
return nil
}
keys := f.knownObjects.ListKeys()
for _, k := range keys {
if err := f.syncKeyLocked(k); err != nil {
return err
}
}
return nil
}
func (f *DeltaFIFO) syncKey(key string) error {
f.lock.Lock()
defer f.lock.Unlock()
return f.syncKeyLocked(key)
}
func (f *DeltaFIFO) syncKeyLocked(key string) error {
obj, exists, err := f.knownObjects.GetByKey(key)
if err != nil {
glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key)
return nil
} else if !exists {
glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key)
return nil
}
// If we are doing Resync() and there is already an event queued for that object,
// we ignore the Resync for it. This is to avoid the race, in which the resync
// comes with the previous value of object (since queueing an event for the object
// doesn't trigger changing the underlying store <knownObjects>.
id, err := f.KeyOf(obj)
if err != nil {
return KeyError{obj, err}
}
if len(f.items[id]) > 0 {
return nil
}
if err := f.queueActionLocked(Sync, obj); err != nil {
return fmt.Errorf("couldn't queue object: %v", err)
}
return nil
}
// A KeyListerGetter is anything that knows how to list its keys and look up by key.
type KeyListerGetter interface {
KeyLister
KeyGetter
}
// A KeyLister is anything that knows how to list its keys.
type KeyLister interface {
ListKeys() []string
}
// A KeyGetter is anything that knows how to get the value stored under a given key.
type KeyGetter interface {
GetByKey(key string) (interface{}, bool, error)
}
// DeltaCompressor is an algorithm that removes redundant changes.
type DeltaCompressor interface {
Compress(Deltas) Deltas
}
// DeltaCompressorFunc should remove redundant changes; but changes that
// are redundant depend on one's desired semantics, so this is an
// injectable function.
//
// DeltaCompressorFunc adapts a raw function to be a DeltaCompressor.
type DeltaCompressorFunc func(Deltas) Deltas
// Compress just calls dc.
func (dc DeltaCompressorFunc) Compress(d Deltas) Deltas {
return dc(d)
}
// DeltaType is the type of a change (addition, deletion, etc)
type DeltaType string
const (
Added DeltaType = "Added"
Updated DeltaType = "Updated"
Deleted DeltaType = "Deleted"
// The other types are obvious. You'll get Sync deltas when:
// * A watch expires/errors out and a new list/watch cycle is started.
// * You've turned on periodic syncs.
// (Anything that trigger's DeltaFIFO's Replace() method.)
Sync DeltaType = "Sync"
)
// Delta is the type stored by a DeltaFIFO. It tells you what change
// happened, and the object's state after* that change.
//
// [*] Unless the change is a deletion, and then you'll get the final
// state of the object before it was deleted.
type Delta struct {
Type DeltaType
Object interface{}
}
// Deltas is a list of one or more 'Delta's to an individual object.
// The oldest delta is at index 0, the newest delta is the last one.
type Deltas []Delta
// Oldest is a convenience function that returns the oldest delta, or
// nil if there are no deltas.
func (d Deltas) Oldest() *Delta {
if len(d) > 0 {
return &d[0]
}
return nil
}
// Newest is a convenience function that returns the newest delta, or
// nil if there are no deltas.
func (d Deltas) Newest() *Delta {
if n := len(d); n > 0 {
return &d[n-1]
}
return nil
}
// copyDeltas returns a shallow copy of d; that is, it copies the slice but not
// the objects in the slice. This allows Get/List to return an object that we
// know won't be clobbered by a subsequent call to a delta compressor.
func copyDeltas(d Deltas) Deltas {
d2 := make(Deltas, len(d))
copy(d2, d)
return d2
}
// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where
// an object was deleted but the watch deletion event was missed. In this
// case we don't know the final "resting" state of the object, so there's
// a chance the included `Obj` is stale.
type DeletedFinalStateUnknown struct {
Key string
Obj interface{}
}

24
vendor/k8s.io/client-go/tools/cache/doc.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cache is a client-side caching mechanism. It is useful for
// reducing the number of server calls you'd otherwise need to make.
// Reflector watches a server and updates a Store. Two stores are provided;
// one that simply caches objects (for example, to allow a scheduler to
// list currently available nodes), and one that additionally acts as
// a FIFO queue (for example, to allow a scheduler to process incoming
// pods).
package cache // import "k8s.io/client-go/tools/cache"

208
vendor/k8s.io/client-go/tools/cache/expiration_cache.go generated vendored Normal file
View File

@ -0,0 +1,208 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/clock"
)
// ExpirationCache implements the store interface
// 1. All entries are automatically time stamped on insert
// a. The key is computed based off the original item/keyFunc
// b. The value inserted under that key is the timestamped item
// 2. Expiration happens lazily on read based on the expiration policy
// a. No item can be inserted into the store while we're expiring
// *any* item in the cache.
// 3. Time-stamps are stripped off unexpired entries before return
// Note that the ExpirationCache is inherently slower than a normal
// threadSafeStore because it takes a write lock every time it checks if
// an item has expired.
type ExpirationCache struct {
cacheStorage ThreadSafeStore
keyFunc KeyFunc
clock clock.Clock
expirationPolicy ExpirationPolicy
// expirationLock is a write lock used to guarantee that we don't clobber
// newly inserted objects because of a stale expiration timestamp comparison
expirationLock sync.Mutex
}
// ExpirationPolicy dictates when an object expires. Currently only abstracted out
// so unittests don't rely on the system clock.
type ExpirationPolicy interface {
IsExpired(obj *timestampedEntry) bool
}
// TTLPolicy implements a ttl based ExpirationPolicy.
type TTLPolicy struct {
// >0: Expire entries with an age > ttl
// <=0: Don't expire any entry
Ttl time.Duration
// Clock used to calculate ttl expiration
Clock clock.Clock
}
// IsExpired returns true if the given object is older than the ttl, or it can't
// determine its age.
func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool {
return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl
}
// timestampedEntry is the only type allowed in a ExpirationCache.
type timestampedEntry struct {
obj interface{}
timestamp time.Time
}
// getTimestampedEntry returns the timestampedEntry stored under the given key.
func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) {
item, _ := c.cacheStorage.Get(key)
if tsEntry, ok := item.(*timestampedEntry); ok {
return tsEntry, true
}
return nil, false
}
// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't
// already expired. It holds a write lock across deletion.
func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
// Prevent all inserts from the time we deem an item as "expired" to when we
// delete it, so an un-expired item doesn't sneak in under the same key, just
// before the Delete.
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
timestampedItem, exists := c.getTimestampedEntry(key)
if !exists {
return nil, false
}
if c.expirationPolicy.IsExpired(timestampedItem) {
glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
c.cacheStorage.Delete(key)
return nil, false
}
return timestampedItem.obj, true
}
// GetByKey returns the item stored under the key, or sets exists=false.
func (c *ExpirationCache) GetByKey(key string) (interface{}, bool, error) {
obj, exists := c.getOrExpire(key)
return obj, exists, nil
}
// Get returns unexpired items. It purges the cache of expired items in the
// process.
func (c *ExpirationCache) Get(obj interface{}) (interface{}, bool, error) {
key, err := c.keyFunc(obj)
if err != nil {
return nil, false, KeyError{obj, err}
}
obj, exists := c.getOrExpire(key)
return obj, exists, nil
}
// List retrieves a list of unexpired items. It purges the cache of expired
// items in the process.
func (c *ExpirationCache) List() []interface{} {
items := c.cacheStorage.List()
list := make([]interface{}, 0, len(items))
for _, item := range items {
obj := item.(*timestampedEntry).obj
if key, err := c.keyFunc(obj); err != nil {
list = append(list, obj)
} else if obj, exists := c.getOrExpire(key); exists {
list = append(list, obj)
}
}
return list
}
// ListKeys returns a list of all keys in the expiration cache.
func (c *ExpirationCache) ListKeys() []string {
return c.cacheStorage.ListKeys()
}
// Add timestamps an item and inserts it into the cache, overwriting entries
// that might exist under the same key.
func (c *ExpirationCache) Add(obj interface{}) error {
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
key, err := c.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
c.cacheStorage.Add(key, &timestampedEntry{obj, c.clock.Now()})
return nil
}
// Update has not been implemented yet for lack of a use case, so this method
// simply calls `Add`. This effectively refreshes the timestamp.
func (c *ExpirationCache) Update(obj interface{}) error {
return c.Add(obj)
}
// Delete removes an item from the cache.
func (c *ExpirationCache) Delete(obj interface{}) error {
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
key, err := c.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
c.cacheStorage.Delete(key)
return nil
}
// Replace will convert all items in the given list to TimestampedEntries
// before attempting the replace operation. The replace operation will
// delete the contents of the ExpirationCache `c`.
func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error {
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
items := map[string]interface{}{}
ts := c.clock.Now()
for _, item := range list {
key, err := c.keyFunc(item)
if err != nil {
return KeyError{item, err}
}
items[key] = &timestampedEntry{item, ts}
}
c.cacheStorage.Replace(items, resourceVersion)
return nil
}
// Resync will touch all objects to put them into the processing queue
func (c *ExpirationCache) Resync() error {
return c.cacheStorage.Resync()
}
// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy
func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store {
return &ExpirationCache{
cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
keyFunc: keyFunc,
clock: clock.RealClock{},
expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}},
}
}

View File

@ -0,0 +1,54 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
)
type fakeThreadSafeMap struct {
ThreadSafeStore
deletedKeys chan<- string
}
func (c *fakeThreadSafeMap) Delete(key string) {
if c.deletedKeys != nil {
c.ThreadSafeStore.Delete(key)
c.deletedKeys <- key
}
}
type FakeExpirationPolicy struct {
NeverExpire sets.String
RetrieveKeyFunc KeyFunc
}
func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool {
key, _ := p.RetrieveKeyFunc(obj)
return !p.NeverExpire.Has(key)
}
func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store {
cacheStorage := NewThreadSafeStore(Indexers{}, Indices{})
return &ExpirationCache{
cacheStorage: &fakeThreadSafeMap{cacheStorage, deletedKeys},
keyFunc: keyFunc,
clock: cacheClock,
expirationPolicy: expirationPolicy,
}
}

View File

@ -0,0 +1,102 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
// FakeStore lets you define custom functions for store operations
type FakeCustomStore struct {
AddFunc func(obj interface{}) error
UpdateFunc func(obj interface{}) error
DeleteFunc func(obj interface{}) error
ListFunc func() []interface{}
ListKeysFunc func() []string
GetFunc func(obj interface{}) (item interface{}, exists bool, err error)
GetByKeyFunc func(key string) (item interface{}, exists bool, err error)
ReplaceFunc func(list []interface{}, resourceVerion string) error
ResyncFunc func() error
}
// Add calls the custom Add function if defined
func (f *FakeCustomStore) Add(obj interface{}) error {
if f.AddFunc != nil {
return f.AddFunc(obj)
}
return nil
}
// Update calls the custom Update function if defined
func (f *FakeCustomStore) Update(obj interface{}) error {
if f.UpdateFunc != nil {
return f.Update(obj)
}
return nil
}
// Delete calls the custom Delete function if defined
func (f *FakeCustomStore) Delete(obj interface{}) error {
if f.DeleteFunc != nil {
return f.DeleteFunc(obj)
}
return nil
}
// List calls the custom List function if defined
func (f *FakeCustomStore) List() []interface{} {
if f.ListFunc != nil {
return f.ListFunc()
}
return nil
}
// ListKeys calls the custom ListKeys function if defined
func (f *FakeCustomStore) ListKeys() []string {
if f.ListKeysFunc != nil {
return f.ListKeysFunc()
}
return nil
}
// Get calls the custom Get function if defined
func (f *FakeCustomStore) Get(obj interface{}) (item interface{}, exists bool, err error) {
if f.GetFunc != nil {
return f.GetFunc(obj)
}
return nil, false, nil
}
// GetByKey calls the custom GetByKey function if defined
func (f *FakeCustomStore) GetByKey(key string) (item interface{}, exists bool, err error) {
if f.GetByKeyFunc != nil {
return f.GetByKeyFunc(key)
}
return nil, false, nil
}
// Replace calls the custom Replace function if defined
func (f *FakeCustomStore) Replace(list []interface{}, resourceVersion string) error {
if f.ReplaceFunc != nil {
return f.ReplaceFunc(list, resourceVersion)
}
return nil
}
// Resync calls the custom Resync function if defined
func (f *FakeCustomStore) Resync() error {
if f.ResyncFunc != nil {
return f.ResyncFunc()
}
return nil
}

358
vendor/k8s.io/client-go/tools/cache/fifo.go generated vendored Normal file
View File

@ -0,0 +1,358 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"errors"
"sync"
"k8s.io/apimachinery/pkg/util/sets"
)
// PopProcessFunc is passed to Pop() method of Queue interface.
// It is supposed to process the element popped from the queue.
type PopProcessFunc func(interface{}) error
// ErrRequeue may be returned by a PopProcessFunc to safely requeue
// the current item. The value of Err will be returned from Pop.
type ErrRequeue struct {
// Err is returned by the Pop function
Err error
}
var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue")
func (e ErrRequeue) Error() string {
if e.Err == nil {
return "the popped item should be requeued without returning an error"
}
return e.Err.Error()
}
// Queue is exactly like a Store, but has a Pop() method too.
type Queue interface {
Store
// Pop blocks until it has something to process.
// It returns the object that was process and the result of processing.
// The PopProcessFunc may return an ErrRequeue{...} to indicate the item
// should be requeued before releasing the lock on the queue.
Pop(PopProcessFunc) (interface{}, error)
// AddIfNotPresent adds a value previously
// returned by Pop back into the queue as long
// as nothing else (presumably more recent)
// has since been added.
AddIfNotPresent(interface{}) error
// Return true if the first batch of items has been popped
HasSynced() bool
// Close queue
Close()
}
// Helper function for popping from Queue.
// WARNING: Do NOT use this function in non-test code to avoid races
// unless you really really really really know what you are doing.
func Pop(queue Queue) interface{} {
var result interface{}
queue.Pop(func(obj interface{}) error {
result = obj
return nil
})
return result
}
// FIFO receives adds and updates from a Reflector, and puts them in a queue for
// FIFO order processing. If multiple adds/updates of a single item happen while
// an item is in the queue before it has been processed, it will only be
// processed once, and when it is processed, the most recent version will be
// processed. This can't be done with a channel.
//
// FIFO solves this use case:
// * You want to process every object (exactly) once.
// * You want to process the most recent version of the object when you process it.
// * You do not want to process deleted objects, they should be removed from the queue.
// * You do not want to periodically reprocess objects.
// Compare with DeltaFIFO for other use cases.
type FIFO struct {
lock sync.RWMutex
cond sync.Cond
// We depend on the property that items in the set are in the queue and vice versa.
items map[string]interface{}
queue []string
// populated is true if the first batch of items inserted by Replace() has been populated
// or Delete/Add/Update was called first.
populated bool
// initialPopulationCount is the number of items inserted by the first call of Replace()
initialPopulationCount int
// keyFunc is used to make the key used for queued item insertion and retrieval, and
// should be deterministic.
keyFunc KeyFunc
// Indication the queue is closed.
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
// Currently, not used to gate any of CRED operations.
closed bool
closedLock sync.Mutex
}
var (
_ = Queue(&FIFO{}) // FIFO is a Queue
)
// Close the queue.
func (f *FIFO) Close() {
f.closedLock.Lock()
defer f.closedLock.Unlock()
f.closed = true
f.cond.Broadcast()
}
// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
// or an Update called first but the first batch of items inserted by Replace() has been popped
func (f *FIFO) HasSynced() bool {
f.lock.Lock()
defer f.lock.Unlock()
return f.populated && f.initialPopulationCount == 0
}
// Add inserts an item, and puts it in the queue. The item is only enqueued
// if it doesn't already exist in the set.
func (f *FIFO) Add(obj interface{}) error {
id, err := f.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
f.lock.Lock()
defer f.lock.Unlock()
f.populated = true
if _, exists := f.items[id]; !exists {
f.queue = append(f.queue, id)
}
f.items[id] = obj
f.cond.Broadcast()
return nil
}
// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already
// present in the set, it is neither enqueued nor added to the set.
//
// This is useful in a single producer/consumer scenario so that the consumer can
// safely retry items without contending with the producer and potentially enqueueing
// stale items.
func (f *FIFO) AddIfNotPresent(obj interface{}) error {
id, err := f.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
f.lock.Lock()
defer f.lock.Unlock()
f.addIfNotPresent(id, obj)
return nil
}
// addIfNotPresent assumes the fifo lock is already held and adds the provided
// item to the queue under id if it does not already exist.
func (f *FIFO) addIfNotPresent(id string, obj interface{}) {
f.populated = true
if _, exists := f.items[id]; exists {
return
}
f.queue = append(f.queue, id)
f.items[id] = obj
f.cond.Broadcast()
}
// Update is the same as Add in this implementation.
func (f *FIFO) Update(obj interface{}) error {
return f.Add(obj)
}
// Delete removes an item. It doesn't add it to the queue, because
// this implementation assumes the consumer only cares about the objects,
// not the order in which they were created/added.
func (f *FIFO) Delete(obj interface{}) error {
id, err := f.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
f.lock.Lock()
defer f.lock.Unlock()
f.populated = true
delete(f.items, id)
return err
}
// List returns a list of all the items.
func (f *FIFO) List() []interface{} {
f.lock.RLock()
defer f.lock.RUnlock()
list := make([]interface{}, 0, len(f.items))
for _, item := range f.items {
list = append(list, item)
}
return list
}
// ListKeys returns a list of all the keys of the objects currently
// in the FIFO.
func (f *FIFO) ListKeys() []string {
f.lock.RLock()
defer f.lock.RUnlock()
list := make([]string, 0, len(f.items))
for key := range f.items {
list = append(list, key)
}
return list
}
// Get returns the requested item, or sets exists=false.
func (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) {
key, err := f.keyFunc(obj)
if err != nil {
return nil, false, KeyError{obj, err}
}
return f.GetByKey(key)
}
// GetByKey returns the requested item, or sets exists=false.
func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
f.lock.RLock()
defer f.lock.RUnlock()
item, exists = f.items[key]
return item, exists, nil
}
// Checks if the queue is closed
func (f *FIFO) IsClosed() bool {
f.closedLock.Lock()
defer f.closedLock.Unlock()
if f.closed {
return true
}
return false
}
// Pop waits until an item is ready and processes it. If multiple items are
// ready, they are returned in the order in which they were added/updated.
// The item is removed from the queue (and the store) before it is processed,
// so if you don't successfully process it, it should be added back with
// AddIfNotPresent(). process function is called under lock, so it is safe
// update data structures in it that need to be in sync with the queue.
func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
f.lock.Lock()
defer f.lock.Unlock()
for {
for len(f.queue) == 0 {
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
// When Close() is called, the f.closed is set and the condition is broadcasted.
// Which causes this loop to continue and return from the Pop().
if f.IsClosed() {
return nil, FIFOClosedError
}
f.cond.Wait()
}
id := f.queue[0]
f.queue = f.queue[1:]
if f.initialPopulationCount > 0 {
f.initialPopulationCount--
}
item, ok := f.items[id]
if !ok {
// Item may have been deleted subsequently.
continue
}
delete(f.items, id)
err := process(item)
if e, ok := err.(ErrRequeue); ok {
f.addIfNotPresent(id, item)
err = e.Err
}
return item, err
}
}
// Replace will delete the contents of 'f', using instead the given map.
// 'f' takes ownership of the map, you should not reference the map again
// after calling this function. f's queue is reset, too; upon return, it
// will contain the items in the map, in no particular order.
func (f *FIFO) Replace(list []interface{}, resourceVersion string) error {
items := map[string]interface{}{}
for _, item := range list {
key, err := f.keyFunc(item)
if err != nil {
return KeyError{item, err}
}
items[key] = item
}
f.lock.Lock()
defer f.lock.Unlock()
if !f.populated {
f.populated = true
f.initialPopulationCount = len(items)
}
f.items = items
f.queue = f.queue[:0]
for id := range items {
f.queue = append(f.queue, id)
}
if len(f.queue) > 0 {
f.cond.Broadcast()
}
return nil
}
// Resync will touch all objects to put them into the processing queue
func (f *FIFO) Resync() error {
f.lock.Lock()
defer f.lock.Unlock()
inQueue := sets.NewString()
for _, id := range f.queue {
inQueue.Insert(id)
}
for id := range f.items {
if !inQueue.Has(id) {
f.queue = append(f.queue, id)
}
}
if len(f.queue) > 0 {
f.cond.Broadcast()
}
return nil
}
// NewFIFO returns a Store which can be used to queue up items to
// process.
func NewFIFO(keyFunc KeyFunc) *FIFO {
f := &FIFO{
items: map[string]interface{}{},
queue: []string{},
keyFunc: keyFunc,
}
f.cond.L = &f.lock
return f
}

Some files were not shown because too many files have changed in this diff Show More