Merge branch 'master' into feature/poll-multi-tags

feature/poll-multi-tags
Karolis Rusenas 2018-11-12 17:25:37 +00:00
commit bd00613e7e
16 changed files with 274 additions and 61 deletions

View File

@ -1,9 +1,9 @@
apiVersion: v1
name: keel
description: Open source, tool for automating Kubernetes deployment updates. Keel is stateless, robust and lightweight.
version: 0.7.1
version: 0.7.2
# Note that we use appVersion to get images tag, so make sure this is correct.
appVersion: 0.10.0
appVersion: 0.12.0
keywords:
- kubernetes deployment
- helm release

View File

@ -4,7 +4,7 @@
image:
repository: keelhq/keel
tag: 0.10.0
tag: 0.12.0
pullPolicy: IfNotPresent
# Enable insecure registries

View File

@ -225,7 +225,15 @@ func setupProviders(k8sImplementer kubernetes.Implementer, sender notification.S
"error": err,
}).Fatal("main.setupProviders: failed to create kubernetes provider")
}
go k8sProvider.Start()
go func() {
err := k8sProvider.Start()
if err != nil {
log.WithFields(log.Fields{
"error": err,
}).Fatal("kubernetes provider stopped with an error")
}
}()
enabledProviders = append(enabledProviders, k8sProvider)
if os.Getenv(EnvHelmProvider) == "1" {
@ -233,7 +241,15 @@ func setupProviders(k8sImplementer kubernetes.Implementer, sender notification.S
helmImplementer := helm.NewHelmImplementer(tillerAddr)
helmProvider := helm.NewProvider(helmImplementer, sender, approvalsManager)
go helmProvider.Start()
go func() {
err := helmProvider.Start()
if err != nil {
log.WithFields(log.Fields{
"error": err,
}).Fatal("helm provider stopped with an error")
}
}()
enabledProviders = append(enabledProviders, helmProvider)
}
@ -253,7 +269,15 @@ func setupTriggers(ctx context.Context, providers provider.Providers, approvalsM
ApprovalManager: approvalsManager,
})
go whs.Start()
go func() {
err := whs.Start()
if err != nil {
log.WithFields(log.Fields{
"error": err,
"port": types.KeelDefaultPort,
}).Fatal("trigger server stopped")
}
}()
// checking whether pubsub (GCR) trigger is enabled
if os.Getenv(EnvTriggerPubSub) != "" {

View File

@ -71,7 +71,7 @@ spec:
containers:
- name: keel
# Note that we use appVersion to get images tag.
image: "keelhq/keel:0.10.0"
image: "keelhq/keel:0.12.0"
imagePullPolicy: IfNotPresent
command: ["/bin/keel"]
env:
@ -79,22 +79,16 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# Enable polling
- name: POLL
value: "1"
# Enable GCR with pub/sub support
- name: PROJECT_ID
value: ""
value: ""
- name: PUBSUB
value: "1"
value: "" # Set to '1' or 'true' to enable GCR pubsub
# Enable AWS ECR
- name: AWS_ACCESS_KEY_ID
value: ""
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: keel-aws-ecr
key: secretAccessKey
value: ""
- name: AWS_REGION
value: ""
# Enable webhook endpoint

View File

@ -71,7 +71,7 @@ spec:
containers:
- name: keel
# Note that we use appVersion to get images tag.
image: "keelhq/keel:0.10.0"
image: "keelhq/keel:0.12.0"
imagePullPolicy: IfNotPresent
command: ["/bin/keel"]
env:
@ -86,15 +86,12 @@ spec:
- name: PROJECT_ID
value: ""
- name: PUBSUB
value: "1"
value: "" # Set to '1' or 'true' to enable GCR pubsub
# Enable AWS ECR
- name: AWS_ACCESS_KEY_ID
value: ""
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: keel-aws-ecr
key: secretAccessKey
value: ""
- name: AWS_REGION
value: ""
# Enable webhook endpoint

View File

@ -141,7 +141,7 @@ spec:
containers:
- name: keel
# Note that we use appVersion to get images tag.
image: "keelhq/keel:0.10.0"
image: "keelhq/keel:0.12.0"
imagePullPolicy: IfNotPresent
command: ["/bin/keel"]
env:
@ -149,22 +149,16 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# Enable polling
- name: POLL
value: "1"
# Enable GCR with pub/sub support
- name: PROJECT_ID
value: ""
- name: PUBSUB
value: "1"
value: "" # Set to '1' or 'true' to enable GCR pubsub
# Enable AWS ECR
- name: AWS_ACCESS_KEY_ID
value: ""
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: keel-aws-ecr
key: secretAccessKey
value: ""
- name: AWS_REGION
value: ""
# Enable webhook endpoint

View File

@ -141,7 +141,7 @@ spec:
containers:
- name: keel
# Note that we use appVersion to get images tag.
image: "keelhq/keel:0.10.0"
image: "keelhq/keel:0.12.0"
imagePullPolicy: IfNotPresent
command: ["/bin/keel"]
env:
@ -149,22 +149,16 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# Enable polling
- name: POLL
value: "1"
# Enable GCR with pub/sub support
- name: PROJECT_ID
value: ""
- name: PUBSUB
value: "1"
value: "" # Set to '1' or 'true' to enable GCR pubsub
# Enable AWS ECR
- name: AWS_ACCESS_KEY_ID
value: ""
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: keel-aws-ecr
key: secretAccessKey
value: ""
- name: AWS_REGION
value: ""
# Enable webhook endpoint

View File

@ -16,7 +16,7 @@ spec:
spec:
serviceAccountName: keel
containers:
- image: keelhq/keel:0.9.7
- image: keelhq/keel:0.12.0
imagePullPolicy: Always
env:
- name: PUBSUB

View File

@ -0,0 +1,59 @@
package policy
import "testing"
func TestGlobPolicy_ShouldUpdate(t *testing.T) {
type fields struct {
policy string
pattern string
}
type args struct {
current string
new string
}
tests := []struct {
name string
fields fields
args args
want bool
wantErr bool
}{
{
name: "test glob latest",
fields: fields{pattern: "latest"},
args: args{current: "latest", new: "latest"},
want: true,
wantErr: false,
},
{
name: "test glob without *",
fields: fields{pattern: "latest"},
args: args{current: "latest", new: "earliest"},
want: false,
wantErr: false,
},
{
name: "test glob with *",
fields: fields{pattern: "lat*"},
args: args{current: "latest", new: "latest"},
want: true,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &GlobPolicy{
policy: tt.fields.policy,
pattern: tt.fields.pattern,
}
got, err := p.ShouldUpdate(tt.args.current, tt.args.new)
if (err != nil) != tt.wantErr {
t.Errorf("GlobPolicy.ShouldUpdate() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("GlobPolicy.ShouldUpdate() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -30,15 +30,20 @@ func (np *NilPolicy) ShouldUpdate(c, n string) (bool, error) { return false, nil
func (np *NilPolicy) Name() string { return "nil policy" }
func (np *NilPolicy) Type() PolicyType { return PolicyTypeNone }
// GetPolicyFromLabels - gets policy from k8s labels
func GetPolicyFromLabels(labels map[string]string) Policy {
// GetPolicyFromLabelsOrAnnotations - gets policy from k8s labels or annotations
func GetPolicyFromLabelsOrAnnotations(labels map[string]string, annotations map[string]string) Policy {
policyName, ok := getPolicyFromLabels(labels)
policyNameA, ok := getPolicyFromLabels(annotations)
if ok {
return GetPolicy(policyNameA, &Options{MatchTag: getMatchTag(annotations)})
}
policyNameL, ok := getPolicyFromLabels(labels)
if !ok {
return &NilPolicy{}
}
return GetPolicy(policyName, &Options{MatchTag: getMatchTag(labels)})
return GetPolicy(policyNameL, &Options{MatchTag: getMatchTag(labels)})
// switch policyName {
// case "all", "major", "minor", "patch":
@ -89,7 +94,7 @@ func GetPolicy(policyName string, options *Options) Policy {
return NewForcePolicy(options.MatchTag)
}
log.Infof("unknown policy '%s'", policyName)
log.Infof("policy.GetPolicy: unknown policy '%s', please check your configuration", policyName)
return &NilPolicy{}
}

View File

@ -91,3 +91,39 @@ func TestGetPolicy(t *testing.T) {
})
}
}
func TestGetPolicyFromLabelsOrAnnotations(t *testing.T) {
type args struct {
labels map[string]string
annotations map[string]string
}
tests := []struct {
name string
args args
want Policy
}{
{
name: "annotations policy",
args: args{
labels: map[string]string{"foo": "bar"},
annotations: map[string]string{types.KeelPolicyLabel: "all"},
},
want: NewSemverPolicy(SemverPolicyTypeAll),
},
{
name: "annotations overides labels",
args: args{
labels: map[string]string{types.KeelPolicyLabel: "patch"},
annotations: map[string]string{types.KeelPolicyLabel: "all"},
},
want: NewSemverPolicy(SemverPolicyTypeAll),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := GetPolicyFromLabelsOrAnnotations(tt.args.labels, tt.args.annotations); !reflect.DeepEqual(got, tt.want) {
t.Errorf("GetPolicyFromLabelsOrAnnotations() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -187,7 +187,7 @@ func (p *Provider) TrackedImages() ([]*types.TrackedImage, error) {
"error": err,
"release": release.Name,
"namespace": release.Namespace,
}).Error("provider.helm: failed to get config for release")
}).Debug("provider.helm: failed to get config for release")
continue
}

View File

@ -131,14 +131,14 @@ func (p *Provider) TrackedImages() ([]*types.TrackedImage, error) {
for _, gr := range p.cache.Values() {
labels := gr.GetLabels()
annotations := gr.GetAnnotations()
// ignoring unlabelled deployments
plc := policy.GetPolicyFromLabels(labels)
plc := policy.GetPolicyFromLabelsOrAnnotations(labels, annotations)
if plc.Type() == policy.PolicyTypeNone {
continue
}
annotations := gr.GetAnnotations()
schedule, ok := annotations[types.KeelPollScheduleAnnotation]
if ok {
_, err := cron.Parse(schedule)
@ -156,7 +156,7 @@ func (p *Provider) TrackedImages() ([]*types.TrackedImage, error) {
}
// trigger type, we only care for "poll" type triggers
trigger := policies.GetTriggerPolicy(labels)
trigger := policies.GetTriggerPolicy(labels, annotations)
secrets := gr.GetImagePullSecrets()
images := gr.GetImages()
for _, img := range images {
@ -345,10 +345,11 @@ func (p *Provider) createUpdatePlans(repo *types.Repository) ([]*UpdatePlan, err
for _, resource := range p.cache.Values() {
labels := resource.GetLabels()
annotations := resource.GetAnnotations()
plc := policy.GetPolicyFromLabels(labels)
plc := policy.GetPolicyFromLabelsOrAnnotations(labels, annotations)
if plc.Type() == policy.PolicyTypeNone {
log.Debugf("no policy defined, skipping: %s, labels: %s", resource.Identifier, labels)
log.Debugf("no policy defined, skipping: %s, labels: %s, annotations: %s", resource.Identifier, labels, annotations)
continue
}

View File

@ -266,6 +266,102 @@ func TestGetImpacted(t *testing.T) {
t.Errorf("couldn't find expected deployment in impacted deployment list")
}
}
func TestGetImpactedPolicyAnnotations(t *testing.T) {
fp := &fakeImplementer{}
fp.namespaces = &v1.NamespaceList{
Items: []v1.Namespace{
v1.Namespace{
meta_v1.TypeMeta{},
meta_v1.ObjectMeta{Name: "xxxx"},
v1.NamespaceSpec{},
v1.NamespaceStatus{},
},
},
}
deps := []*apps_v1.Deployment{
{
meta_v1.TypeMeta{},
meta_v1.ObjectMeta{
Name: "dep-1",
Namespace: "xxxx",
Annotations: map[string]string{types.KeelPolicyLabel: "all"},
Labels: map[string]string{"foo": "all"},
},
apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
v1.Container{
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
},
},
},
},
},
apps_v1.DeploymentStatus{},
},
{
meta_v1.TypeMeta{},
meta_v1.ObjectMeta{
Name: "dep-2",
Namespace: "xxxx",
Labels: map[string]string{"whatever": "all"},
},
apps_v1.DeploymentSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
v1.Container{
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
},
},
},
},
},
apps_v1.DeploymentStatus{},
},
}
grs := MustParseGRS(deps)
grc := &k8s.GenericResourceCache{}
grc.Add(grs...)
provider, err := NewProvider(fp, &fakeSender{}, approver(), grc)
if err != nil {
t.Fatalf("failed to get provider: %s", err)
}
// creating "new version" event
repo := &types.Repository{
Name: "gcr.io/v2-namespace/hello-world",
Tag: "1.1.2",
}
plans, err := provider.createUpdatePlans(repo)
if err != nil {
t.Errorf("failed to get deployments: %s", err)
}
if len(plans) != 1 {
t.Fatalf("expected to find 1 deployment update plan but found %d", len(plans))
}
found := false
for _, c := range plans[0].Resource.Containers() {
containerImageName := versionreg.ReplaceAllString(c.Image, "")
if containerImageName == repo.Name {
found = true
}
}
if !found {
t.Errorf("couldn't find expected deployment in impacted deployment list")
}
}
func TestPrereleaseGetImpactedA(t *testing.T) {
// test scenario when we have two deployments, one with pre-release tag

View File

@ -2,7 +2,6 @@ package http
import (
"encoding/json"
"fmt"
"net/http"
"time"
@ -99,18 +98,18 @@ func (s *TriggerServer) registryNotificationHandler(resp http.ResponseWriter, re
return
}
log.WithFields(log.Fields{
"event": rn,
}).Debug("registryNotificationHandler: received event, looking for a push tag")
for _, e := range rn.Events {
if e.Action != "push" {
// ignoring non-push events
resp.WriteHeader(200)
return
continue
}
if e.Target.Tag == "" {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(resp, "tag cannot be empty")
return
continue
}
dockerURL := e.Request.Host + "/" + e.Target.Repository
@ -122,10 +121,16 @@ func (s *TriggerServer) registryNotificationHandler(resp http.ResponseWriter, re
event.Repository.Tag = e.Target.Tag
event.Repository.Digest = e.Target.Digest
log.WithFields(log.Fields{
"action": e.Action,
"tag": e.Target.Tag,
"repository": dockerURL,
"digest": e.Target.Digest,
}).Debug("registryNotificationHandler: got registry notification, processing")
s.trigger(event)
newRegistryNotificationWebhooksCounter.With(prometheus.Labels{"image": event.Repository.Name}).Inc()
}
}

View File

@ -6,10 +6,18 @@ import (
// GetTriggerPolicy - checks for trigger label, if not set - returns
// default trigger type
func GetTriggerPolicy(labels map[string]string) types.TriggerType {
func GetTriggerPolicy(labels map[string]string, annotations map[string]string) types.TriggerType {
triggerAnn, ok := annotations[types.KeelTriggerLabel]
if ok {
return types.ParseTrigger(triggerAnn)
}
// checking labels
trigger, ok := labels[types.KeelTriggerLabel]
if ok {
return types.ParseTrigger(trigger)
}
return types.TriggerTypeDefault
}