Build with kube 1.24 (#1)
* Build with kubernetes 1.24 libraries * Try to resolve circle build error * Fix build warnings * Update test parser to use modern command install method * Attempt to pass helm chart testing * Correct testing kube version * Update go version used by the drone ci * Remove beta versions from templetespull/706/head
parent
7e4255ab45
commit
bc14e01e9d
|
@ -6,7 +6,7 @@ jobs:
|
|||
build:
|
||||
docker:
|
||||
# specify the version
|
||||
- image: circleci/golang:1.14.2
|
||||
- image: golang:1.20.1
|
||||
|
||||
#### TEMPLATE_NOTE: go expects specific checkout path representing url
|
||||
#### expecting it in the form of
|
||||
|
@ -34,7 +34,7 @@ jobs:
|
|||
shellcheck -x .test/e2e-kind.sh
|
||||
lint-charts:
|
||||
docker:
|
||||
- image: quay.io/helmpack/chart-testing:v3.3.1
|
||||
- image: quay.io/helmpack/chart-testing:v3.7.1
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
|
@ -47,9 +47,9 @@ jobs:
|
|||
machine: true
|
||||
environment:
|
||||
CHART_TESTING_IMAGE: quay.io/helmpack/chart-testing
|
||||
CHART_TESTING_TAG: v3.3.1
|
||||
CHART_TESTING_TAG: v3.7.1
|
||||
CHARTS_REPO: https://github.com/keel-hq/keel
|
||||
K8S_VERSION: v1.11.3
|
||||
K8S_VERSION: v1.24.7
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
|
|
|
@ -7,12 +7,12 @@ workspace:
|
|||
|
||||
steps:
|
||||
- name: unit-test
|
||||
image: golang:1.14.2
|
||||
image: golang:1.20.1
|
||||
commands:
|
||||
- make test
|
||||
|
||||
- name: build
|
||||
image: golang:1.14.2
|
||||
image: golang:1.20.1
|
||||
commands:
|
||||
- make install
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.14.2
|
||||
FROM golang:1.20.1
|
||||
COPY . /go/src/github.com/keel-hq/keel
|
||||
WORKDIR /go/src/github.com/keel-hq/keel
|
||||
RUN make install
|
||||
|
|
2
Makefile
2
Makefile
|
@ -52,7 +52,7 @@ aarch64:
|
|||
arm: build-arm fetch-certs armhf aarch64
|
||||
|
||||
test:
|
||||
go get github.com/mfridman/tparse
|
||||
go install github.com/mfridman/tparse@latest
|
||||
go test -json -v `go list ./... | egrep -v /tests` -cover | tparse -all -smallscreen
|
||||
|
||||
build:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
name: keel
|
||||
description: Open source, tool for automating Kubernetes deployment updates. Keel is stateless, robust and lightweight.
|
||||
version: 0.9.11
|
||||
version: 1.0.0
|
||||
# Note that we use appVersion to get images tag, so make sure this is correct.
|
||||
appVersion: 0.16.1
|
||||
keywords:
|
||||
|
|
|
@ -63,15 +63,7 @@ spec:
|
|||
value: "false"
|
||||
{{- end }}
|
||||
{{- if .Values.helmProvider.enabled }}
|
||||
{{- if eq .Values.helmProvider.version "v2" }}
|
||||
# Enable/disable Helm provider
|
||||
- name: HELM_PROVIDER
|
||||
value: "true"
|
||||
- name: TILLER_NAMESPACE
|
||||
value: "{{ .Values.helmProvider.tillerNamespace }}"
|
||||
- name: TILLER_ADDRESS
|
||||
value: "{{ .Values.helmProvider.tillerAddress }}"
|
||||
{{- else if eq .Values.helmProvider.version "v3" }}
|
||||
{{- if eq .Values.helmProvider.version "v3" }}
|
||||
# Enable/disable Helm provider
|
||||
- name: HELM3_PROVIDER
|
||||
value: "true"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{{- if .Values.podDisruptionBudget.enabled }}
|
||||
apiVersion: policy/v1beta1
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ template "keel.fullname" . }}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
|
@ -13,7 +12,6 @@ import (
|
|||
kingpin "gopkg.in/alecthomas/kingpin.v2"
|
||||
kube "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/helm/pkg/helm/portforwarder"
|
||||
|
||||
"github.com/keel-hq/keel/approvals"
|
||||
"github.com/keel-hq/keel/bot"
|
||||
|
@ -30,7 +28,6 @@ import (
|
|||
"github.com/keel-hq/keel/internal/k8s"
|
||||
"github.com/keel-hq/keel/internal/workgroup"
|
||||
"github.com/keel-hq/keel/provider"
|
||||
"github.com/keel-hq/keel/provider/helm"
|
||||
"github.com/keel-hq/keel/provider/helm3"
|
||||
"github.com/keel-hq/keel/provider/kubernetes"
|
||||
"github.com/keel-hq/keel/registry"
|
||||
|
@ -59,23 +56,19 @@ import (
|
|||
_ "github.com/keel-hq/keel/bot/slack"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
// importing to ensure correct dependencies
|
||||
_ "helm.sh/helm/v3/pkg/action"
|
||||
)
|
||||
|
||||
// gcloud pubsub related config
|
||||
const (
|
||||
EnvTriggerPubSub = "PUBSUB" // set to 1 or something to enable pub/sub trigger
|
||||
EnvTriggerPoll = "POLL" // set to 0 to disable poll trigger
|
||||
EnvProjectID = "PROJECT_ID"
|
||||
EnvClusterName = "CLUSTER_NAME"
|
||||
EnvDataDir = "XDG_DATA_HOME"
|
||||
EnvHelmProvider = "HELM_PROVIDER" // helm provider
|
||||
EnvHelmTillerAddress = "TILLER_ADDRESS" // helm provider
|
||||
EnvHelmTillerNamespace = "TILLER_NAMESPACE" // helm provider
|
||||
EnvHelm3Provider = "HELM3_PROVIDER" // helm3 provider
|
||||
EnvUIDir = "UI_DIR"
|
||||
EnvTriggerPubSub = "PUBSUB" // set to 1 or something to enable pub/sub trigger
|
||||
EnvTriggerPoll = "POLL" // set to 0 to disable poll trigger
|
||||
EnvProjectID = "PROJECT_ID"
|
||||
EnvClusterName = "CLUSTER_NAME"
|
||||
EnvDataDir = "XDG_DATA_HOME"
|
||||
EnvHelm3Provider = "HELM3_PROVIDER" // helm3 provider
|
||||
EnvUIDir = "UI_DIR"
|
||||
|
||||
// EnvDefaultDockerRegistryCfg - default registry configuration that can be passed into
|
||||
// keel for polling trigger
|
||||
|
@ -321,45 +314,6 @@ func setupProviders(opts *ProviderOpts) (providers provider.Providers) {
|
|||
|
||||
enabledProviders = append(enabledProviders, k8sProvider)
|
||||
|
||||
if os.Getenv(EnvHelmProvider) == "1" || os.Getenv(EnvHelmProvider) == "true" {
|
||||
|
||||
var tillerAddr string
|
||||
|
||||
if os.Getenv(EnvHelmTillerAddress) != "" {
|
||||
tillerAddr = os.Getenv(EnvHelmTillerAddress)
|
||||
log.Infof("Tiller address specified: %s", tillerAddr)
|
||||
} else {
|
||||
tillerNamespace := "kube-system"
|
||||
if os.Getenv(EnvHelmTillerNamespace) != "" {
|
||||
tillerNamespace = os.Getenv(EnvHelmTillerNamespace)
|
||||
}
|
||||
|
||||
tillerTunnel, err := portforwarder.New(tillerNamespace, opts.k8sClient, opts.config)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
}).Fatal("failed to setup Tiller tunnel")
|
||||
}
|
||||
|
||||
tillerAddr = fmt.Sprintf("127.0.0.1:%d", tillerTunnel.Local)
|
||||
log.Infof("created local tunnel using local port: '%d'", tillerTunnel.Local)
|
||||
}
|
||||
|
||||
helmImplementer := helm.NewHelmImplementer(tillerAddr)
|
||||
helmProvider := helm.NewProvider(helmImplementer, opts.sender, opts.approvalsManager)
|
||||
|
||||
go func() {
|
||||
err := helmProvider.Start()
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
}).Fatal("helm provider stopped with an error")
|
||||
}
|
||||
}()
|
||||
|
||||
enabledProviders = append(enabledProviders, helmProvider)
|
||||
}
|
||||
|
||||
if os.Getenv(EnvHelm3Provider) == "1" || os.Getenv(EnvHelm3Provider) == "true" {
|
||||
helm3Implementer := helm3.NewHelm3Implementer()
|
||||
helm3Provider := helm3.NewProvider(helm3Implementer, opts.sender, opts.approvalsManager)
|
||||
|
|
|
@ -242,7 +242,7 @@ spec:
|
|||
---
|
||||
# Source: keel/templates/pod-disruption-budget.yaml
|
||||
|
||||
apiVersion: policy/v1beta1
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: keel
|
||||
|
|
224
go.mod
224
go.mod
|
@ -1,74 +1,196 @@
|
|||
module github.com/keel-hq/keel
|
||||
|
||||
go 1.14
|
||||
go 1.19
|
||||
|
||||
replace (
|
||||
k8s.io/api => k8s.io/api v0.16.10
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.16.10
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.16.10
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.16.10
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.16.10
|
||||
k8s.io/client-go => k8s.io/client-go v0.16.10
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.16.10
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.16.10
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.16.10
|
||||
k8s.io/component-base => k8s.io/component-base v0.16.10
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.16.10
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.16.10
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.16.10
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.16.10
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.16.10
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.16.10
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.16.10
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.16.10
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.16.10
|
||||
k8s.io/metrics => k8s.io/metrics v0.16.10
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.16.10
|
||||
k8s.io/api => k8s.io/api v0.24.10
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.24.10
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.24.10
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.24.10
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.24.10
|
||||
k8s.io/client-go => k8s.io/client-go v0.24.10
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.24.10
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.24.10
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.24.10
|
||||
k8s.io/component-base => k8s.io/component-base v0.24.10
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.24.10
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.24.10
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.10
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.10
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.10
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.10
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.24.10
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.24.10
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.24.10
|
||||
k8s.io/metrics => k8s.io/metrics v0.24.10
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.10
|
||||
)
|
||||
|
||||
replace (
|
||||
helm.sh/helm/v3 => helm.sh/helm/v3 v3.1.2
|
||||
k8s.io/helm => k8s.io/helm v2.16.7+incompatible
|
||||
)
|
||||
replace helm.sh/helm/v3 => helm.sh/helm/v3 v3.9.4
|
||||
|
||||
replace k8s.io/kubernetes => k8s.io/kubernetes v1.16.10
|
||||
replace k8s.io/kubernetes => k8s.io/kubernetes v1.24.10
|
||||
|
||||
require (
|
||||
cloud.google.com/go/pubsub v1.4.0
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible // indirect
|
||||
github.com/aws/aws-sdk-go v1.31.10
|
||||
github.com/daneharrigan/hipchat v0.0.0-20170512185232-835dc879394a
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/docker/distribution v2.7.1+incompatible
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/gorilla/mux v1.7.4
|
||||
github.com/jinzhu/gorm v1.9.12
|
||||
github.com/jmoiron/sqlx v1.2.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/mfridman/tparse v0.8.2 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/jinzhu/gorm v1.9.16
|
||||
github.com/nlopes/slack v0.6.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/prometheus/client_golang v1.6.0
|
||||
github.com/rubenv/sql-migrate v0.0.0-20200429072036-ae26b214fa43 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/rusenask/cron v1.1.0
|
||||
github.com/rusenask/docker-registry-client v0.0.0-20200210164146-049272422097
|
||||
github.com/ryanuber/go-glob v1.0.0
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/stretchr/testify v1.5.1
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/tbruyelle/hipchat-go v0.0.0-20170717082847-35aebc99209a
|
||||
github.com/urfave/negroni v1.0.0
|
||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9
|
||||
google.golang.org/api v0.26.0
|
||||
google.golang.org/grpc v1.29.1
|
||||
golang.org/x/net v0.5.0
|
||||
google.golang.org/api v0.103.0
|
||||
google.golang.org/grpc v1.51.0
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
helm.sh/helm/v3 v3.0.0-00010101000000-000000000000
|
||||
k8s.io/api v0.17.2
|
||||
k8s.io/apimachinery v0.17.2
|
||||
k8s.io/cli-runtime v0.17.2
|
||||
k8s.io/client-go v0.17.2
|
||||
k8s.io/helm v0.0.0-00010101000000-000000000000
|
||||
sigs.k8s.io/yaml v1.1.0
|
||||
helm.sh/helm/v3 v3.9.4
|
||||
k8s.io/api v0.24.10
|
||||
k8s.io/apimachinery v0.24.10
|
||||
k8s.io/cli-runtime v0.24.10
|
||||
k8s.io/client-go v0.24.10
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.107.0 // indirect
|
||||
cloud.google.com/go/compute v1.12.1 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.1 // indirect
|
||||
cloud.google.com/go/iam v0.8.0 // indirect
|
||||
cloud.google.com/go/kms v1.8.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/BurntSushi/toml v1.0.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.1.1 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.2.2 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.3 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
|
||||
github.com/containerd/containerd v1.6.6 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/cli v20.10.17+incompatible // indirect
|
||||
github.com/docker/docker v20.10.17+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/go-errors/errors v1.0.1 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.0.2 // indirect
|
||||
github.com/go-logr/logr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
|
||||
github.com/huandu/xstrings v1.3.2 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.3.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.3.5 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.13.6 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/lib/pq v1.10.6 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/rubenv/sql-migrate v1.2.0 // indirect
|
||||
github.com/russross/blackfriday v1.5.2 // indirect
|
||||
github.com/shopspring/decimal v1.2.0 // indirect
|
||||
github.com/spf13/cast v1.4.1 // indirect
|
||||
github.com/spf13/cobra v1.4.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.4.0 // indirect
|
||||
golang.org/x/term v0.4.0 // indirect
|
||||
golang.org/x/text v0.6.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.24.2 // indirect
|
||||
k8s.io/apiserver v0.24.10 // indirect
|
||||
k8s.io/component-base v0.24.10 // indirect
|
||||
k8s.io/klog/v2 v2.60.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 // indirect
|
||||
k8s.io/kubectl v0.24.2 // indirect
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
||||
oras.land/oras-go v1.2.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.11.4 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.6 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
)
|
||||
|
|
|
@ -2,7 +2,7 @@ package k8s
|
|||
|
||||
import (
|
||||
apps_v1 "k8s.io/api/apps/v1"
|
||||
v1beta1 "k8s.io/api/batch/v1beta1"
|
||||
batch_v1 "k8s.io/api/batch/v1"
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
|
@ -54,10 +54,10 @@ func updateDaemonsetSetContainer(s *apps_v1.DaemonSet, index int, image string)
|
|||
|
||||
// cron
|
||||
|
||||
func getCronJobIdentifier(s *v1beta1.CronJob) string {
|
||||
func getCronJobIdentifier(s *batch_v1.CronJob) string {
|
||||
return "cronjob/" + s.Namespace + "/" + s.Name
|
||||
}
|
||||
|
||||
func updateCronJobContainer(s *v1beta1.CronJob, index int, image string) {
|
||||
func updateCronJobContainer(s *batch_v1.CronJob, index int, image string) {
|
||||
s.Spec.JobTemplate.Spec.Template.Spec.Containers[index].Image = image
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"strings"
|
||||
|
||||
apps_v1 "k8s.io/api/apps/v1"
|
||||
v1beta1 "k8s.io/api/batch/v1beta1"
|
||||
batch_v1 "k8s.io/api/batch/v1"
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
|
@ -41,7 +41,7 @@ func NewGenericResource(obj interface{}) (*GenericResource, error) {
|
|||
switch obj.(type) {
|
||||
case *apps_v1.Deployment, *apps_v1.StatefulSet, *apps_v1.DaemonSet:
|
||||
// ok
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
// ok
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported resource type: %v", reflect.TypeOf(obj).Kind())
|
||||
|
@ -79,7 +79,7 @@ func (r *GenericResource) DeepCopy() *GenericResource {
|
|||
gr.obj = obj.DeepCopy()
|
||||
case *apps_v1.DaemonSet:
|
||||
gr.obj = obj.DeepCopy()
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
gr.obj = obj.DeepCopy()
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ func (r *GenericResource) GetIdentifier() string {
|
|||
return getStatefulSetIdentifier(obj)
|
||||
case *apps_v1.DaemonSet:
|
||||
return getDaemonsetSetIdentifier(obj)
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
return getCronJobIdentifier(obj)
|
||||
}
|
||||
return ""
|
||||
|
@ -110,7 +110,7 @@ func (r *GenericResource) GetName() string {
|
|||
return obj.GetName()
|
||||
case *apps_v1.DaemonSet:
|
||||
return obj.GetName()
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
return obj.GetName()
|
||||
}
|
||||
return ""
|
||||
|
@ -125,7 +125,7 @@ func (r *GenericResource) GetNamespace() string {
|
|||
return obj.GetNamespace()
|
||||
case *apps_v1.DaemonSet:
|
||||
return obj.GetNamespace()
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
return obj.GetNamespace()
|
||||
}
|
||||
return ""
|
||||
|
@ -140,7 +140,7 @@ func (r *GenericResource) Kind() string {
|
|||
return "statefulset"
|
||||
case *apps_v1.DaemonSet:
|
||||
return "daemonset"
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
return "cronjob"
|
||||
}
|
||||
return ""
|
||||
|
@ -160,7 +160,7 @@ func (r *GenericResource) GetLabels() (labels map[string]string) {
|
|||
return getOrInitialise(obj.GetLabels())
|
||||
case *apps_v1.DaemonSet:
|
||||
return getOrInitialise(obj.GetLabels())
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
return getOrInitialise(obj.GetLabels())
|
||||
}
|
||||
return
|
||||
|
@ -175,7 +175,7 @@ func (r *GenericResource) SetLabels(labels map[string]string) {
|
|||
obj.SetLabels(labels)
|
||||
case *apps_v1.DaemonSet:
|
||||
obj.SetLabels(labels)
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
obj.SetLabels(labels)
|
||||
}
|
||||
}
|
||||
|
@ -189,7 +189,7 @@ func (r *GenericResource) GetSpecAnnotations() (annotations map[string]string) {
|
|||
return getOrInitialise(obj.Spec.Template.GetAnnotations())
|
||||
case *apps_v1.DaemonSet:
|
||||
return getOrInitialise(obj.Spec.Template.GetAnnotations())
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
return getOrInitialise(obj.Spec.JobTemplate.GetAnnotations())
|
||||
}
|
||||
return
|
||||
|
@ -204,7 +204,7 @@ func (r *GenericResource) SetSpecAnnotations(annotations map[string]string) {
|
|||
obj.Spec.Template.SetAnnotations(annotations)
|
||||
case *apps_v1.DaemonSet:
|
||||
obj.Spec.Template.SetAnnotations(annotations)
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
obj.Spec.JobTemplate.SetAnnotations(annotations)
|
||||
}
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ func (r *GenericResource) GetAnnotations() (annotations map[string]string) {
|
|||
return getOrInitialise(obj.GetAnnotations())
|
||||
case *apps_v1.DaemonSet:
|
||||
return getOrInitialise(obj.GetAnnotations())
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
return getOrInitialise(obj.GetAnnotations())
|
||||
}
|
||||
return
|
||||
|
@ -240,7 +240,7 @@ func (r *GenericResource) SetAnnotations(annotations map[string]string) {
|
|||
obj.SetAnnotations(annotations)
|
||||
case *apps_v1.DaemonSet:
|
||||
obj.SetAnnotations(annotations)
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
obj.SetAnnotations(annotations)
|
||||
}
|
||||
}
|
||||
|
@ -254,7 +254,7 @@ func (r *GenericResource) GetImagePullSecrets() (secrets []string) {
|
|||
return getImagePullSecrets(obj.Spec.Template.Spec.ImagePullSecrets)
|
||||
case *apps_v1.DaemonSet:
|
||||
return getImagePullSecrets(obj.Spec.Template.Spec.ImagePullSecrets)
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
return getImagePullSecrets(obj.Spec.JobTemplate.Spec.Template.Spec.ImagePullSecrets)
|
||||
}
|
||||
return
|
||||
|
@ -269,7 +269,7 @@ func (r *GenericResource) GetImages() (images []string) {
|
|||
return getContainerImages(obj.Spec.Template.Spec.Containers)
|
||||
case *apps_v1.DaemonSet:
|
||||
return getContainerImages(obj.Spec.Template.Spec.Containers)
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
return getContainerImages(obj.Spec.JobTemplate.Spec.Template.Spec.Containers)
|
||||
}
|
||||
return
|
||||
|
@ -284,7 +284,7 @@ func (r *GenericResource) Containers() (containers []core_v1.Container) {
|
|||
return obj.Spec.Template.Spec.Containers
|
||||
case *apps_v1.DaemonSet:
|
||||
return obj.Spec.Template.Spec.Containers
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
return obj.Spec.JobTemplate.Spec.Template.Spec.Containers
|
||||
}
|
||||
return
|
||||
|
@ -299,7 +299,7 @@ func (r *GenericResource) UpdateContainer(index int, image string) {
|
|||
updateStatefulSetContainer(obj, index, image)
|
||||
case *apps_v1.DaemonSet:
|
||||
updateDaemonsetSetContainer(obj, index, image)
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
updateCronJobContainer(obj, index, image)
|
||||
}
|
||||
}
|
||||
|
@ -354,7 +354,7 @@ func (r *GenericResource) GetStatus() Status {
|
|||
AvailableReplicas: obj.Status.NumberAvailable,
|
||||
UnavailableReplicas: obj.Status.NumberUnavailable,
|
||||
}
|
||||
case *v1beta1.CronJob:
|
||||
case *batch_v1.CronJob:
|
||||
return Status{
|
||||
Replicas: int32(len(obj.Status.Active)),
|
||||
UpdatedReplicas: 0,
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
|
||||
apps_v1 "k8s.io/api/apps/v1"
|
||||
v1beta1 "k8s.io/api/batch/v1beta1"
|
||||
batch_v1 "k8s.io/api/batch/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
|
@ -33,9 +33,9 @@ func WatchDaemonSets(g *workgroup.Group, client *kubernetes.Clientset, log logru
|
|||
watch(g, client.AppsV1().RESTClient(), log, "daemonsets", new(apps_v1.DaemonSet), rs...)
|
||||
}
|
||||
|
||||
// WatchCronJobs creates a SharedInformer for v1beta1.CronJob and registers it with g.
|
||||
// WatchCronJobs creates a SharedInformer for batch_v1.CronJob and registers it with g.
|
||||
func WatchCronJobs(g *workgroup.Group, client *kubernetes.Clientset, log logrus.FieldLogger, rs ...cache.ResourceEventHandler) {
|
||||
watch(g, client.BatchV1beta1().RESTClient(), log, "cronjobs", new(v1beta1.CronJob), rs...)
|
||||
watch(g, client.BatchV1().RESTClient(), log, "cronjobs", new(batch_v1.CronJob), rs...)
|
||||
}
|
||||
|
||||
func watch(g *workgroup.Group, c cache.Getter, log logrus.FieldLogger, resource string, objType runtime.Object, rs ...cache.ResourceEventHandler) {
|
||||
|
|
|
@ -1,92 +0,0 @@
|
|||
package helm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/keel-hq/keel/pkg/store"
|
||||
"github.com/keel-hq/keel/types"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// namespace/release name:version
|
||||
func getIdentifier(plan *UpdatePlan) string {
|
||||
return fmt.Sprintf("%s/%s:%s", plan.Namespace, plan.Name, plan.NewVersion)
|
||||
}
|
||||
|
||||
func (p *Provider) checkForApprovals(event *types.Event, plans []*UpdatePlan) (approvedPlans []*UpdatePlan) {
|
||||
approvedPlans = []*UpdatePlan{}
|
||||
for _, plan := range plans {
|
||||
approved, err := p.isApproved(event, plan)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
"release_name": plan.Name,
|
||||
"namespace": plan.Namespace,
|
||||
"version": plan.NewVersion,
|
||||
}).Error("provider.helm: failed to check approval status for deployment")
|
||||
continue
|
||||
}
|
||||
if approved {
|
||||
approvedPlans = append(approvedPlans, plan)
|
||||
}
|
||||
}
|
||||
return approvedPlans
|
||||
}
|
||||
|
||||
// updateComplete is called after we successfully update resource
|
||||
func (p *Provider) updateComplete(plan *UpdatePlan) error {
|
||||
return p.approvalManager.Archive(getIdentifier(plan))
|
||||
}
|
||||
|
||||
func (p *Provider) isApproved(event *types.Event, plan *UpdatePlan) (bool, error) {
|
||||
if plan.Config.Approvals == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
identifier := getIdentifier(plan)
|
||||
|
||||
// checking for existing approval
|
||||
existing, err := p.approvalManager.Get(identifier)
|
||||
if err != nil {
|
||||
if err == store.ErrRecordNotFound {
|
||||
|
||||
// if approval doesn't exist and trigger wasn't existing approval fulfillment -
|
||||
// create a new one, otherwise if several deployments rely on the same image, it would just be
|
||||
// requesting approvals in a loop
|
||||
if event.TriggerName == types.TriggerTypeApproval.String() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if plan.Config.ApprovalDeadline == 0 {
|
||||
plan.Config.ApprovalDeadline = types.KeelApprovalDeadlineDefault
|
||||
}
|
||||
|
||||
// creating new one
|
||||
approval := &types.Approval{
|
||||
Provider: types.ProviderTypeHelm,
|
||||
Identifier: identifier,
|
||||
Event: event,
|
||||
CurrentVersion: plan.CurrentVersion,
|
||||
NewVersion: plan.NewVersion,
|
||||
VotesRequired: plan.Config.Approvals,
|
||||
VotesReceived: 0,
|
||||
Rejected: false,
|
||||
Deadline: time.Now().Add(time.Duration(plan.Config.ApprovalDeadline) * time.Hour),
|
||||
}
|
||||
|
||||
approval.Message = fmt.Sprintf("New image is available for release %s/%s (%s).",
|
||||
plan.Namespace,
|
||||
plan.Name,
|
||||
approval.Delta(),
|
||||
)
|
||||
|
||||
return false, p.approvalManager.Create(approval)
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
return existing.Status() == types.ApprovalStatusApproved, nil
|
||||
}
|
|
@ -1,106 +0,0 @@
|
|||
package helm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/keel-hq/keel/types"
|
||||
"github.com/keel-hq/keel/util/image"
|
||||
|
||||
"k8s.io/helm/pkg/chartutil"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrKeelConfigNotFound - default error when keel configuration for chart is not defined
|
||||
var ErrKeelConfigNotFound = errors.New("keel configuration not found")
|
||||
|
||||
// getImages - get images from chart values
|
||||
func getImages(vals chartutil.Values) ([]*types.TrackedImage, error) {
|
||||
var images []*types.TrackedImage
|
||||
|
||||
keelCfg, err := getKeelConfig(vals)
|
||||
if err != nil {
|
||||
if err == ErrPolicyNotSpecified {
|
||||
// nothing to do
|
||||
return images, nil
|
||||
}
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
}).Error("provider.helm: failed to get keel configuration for release")
|
||||
// ignoring this release, no keel config found
|
||||
return nil, ErrKeelConfigNotFound
|
||||
}
|
||||
|
||||
for _, imageDetails := range keelCfg.Images {
|
||||
imageRef, err := parseImage(vals, &imageDetails)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
"repository_name": imageDetails.RepositoryPath,
|
||||
"repository_tag": imageDetails.TagPath,
|
||||
}).Error("provider.helm: failed to parse image")
|
||||
continue
|
||||
}
|
||||
|
||||
trackedImage := &types.TrackedImage{
|
||||
Image: imageRef,
|
||||
PollSchedule: keelCfg.PollSchedule,
|
||||
Trigger: keelCfg.Trigger,
|
||||
Policy: keelCfg.Plc,
|
||||
}
|
||||
|
||||
if imageDetails.ImagePullSecret != "" {
|
||||
trackedImage.Secrets = append(trackedImage.Secrets, imageDetails.ImagePullSecret)
|
||||
}
|
||||
|
||||
images = append(images, trackedImage)
|
||||
}
|
||||
|
||||
return images, nil
|
||||
}
|
||||
|
||||
func getPlanValues(newVersion *types.Version, ref *image.Reference, imageDetails *ImageDetails) (path, value string) {
|
||||
// vals := make(map[string]string)
|
||||
// if tag is not supplied, then user specified full image name
|
||||
if imageDetails.TagPath == "" {
|
||||
return imageDetails.RepositoryPath, getUpdatedImage(ref, newVersion.String())
|
||||
}
|
||||
return imageDetails.TagPath, newVersion.String()
|
||||
}
|
||||
|
||||
func getUnversionedPlanValues(newTag string, ref *image.Reference, imageDetails *ImageDetails) (path, value string) {
|
||||
// if tag is not supplied, then user specified full image name
|
||||
if imageDetails.TagPath == "" {
|
||||
return imageDetails.RepositoryPath, getUpdatedImage(ref, newTag)
|
||||
}
|
||||
return imageDetails.TagPath, newTag
|
||||
}
|
||||
|
||||
func getUpdatedImage(ref *image.Reference, version string) string {
|
||||
// updating image
|
||||
if ref.Registry() == image.DefaultRegistryHostname {
|
||||
return fmt.Sprintf("%s:%s", ref.ShortName(), version)
|
||||
}
|
||||
return fmt.Sprintf("%s:%s", ref.Repository(), version)
|
||||
}
|
||||
|
||||
func parseImage(vals chartutil.Values, details *ImageDetails) (*image.Reference, error) {
|
||||
if details.RepositoryPath == "" {
|
||||
return nil, fmt.Errorf("repository name path cannot be empty")
|
||||
}
|
||||
|
||||
imageName, err := getValueAsString(vals, details.RepositoryPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// getting image tag
|
||||
imageTag, err := getValueAsString(vals, details.TagPath)
|
||||
if err != nil {
|
||||
// failed to find tag, returning anyway
|
||||
return image.Parse(imageName)
|
||||
}
|
||||
|
||||
return image.Parse(imageName + ":" + imageTag)
|
||||
}
|
|
@ -1,143 +0,0 @@
|
|||
package helm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/keel-hq/keel/internal/policy"
|
||||
"github.com/keel-hq/keel/types"
|
||||
"github.com/keel-hq/keel/util/image"
|
||||
"k8s.io/helm/pkg/chartutil"
|
||||
)
|
||||
|
||||
var chartValuesA = `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.1.0
|
||||
|
||||
keel:
|
||||
policy: all
|
||||
trigger: poll
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
|
||||
`
|
||||
|
||||
func mustParse(name string) *image.Reference {
|
||||
img, err := image.Parse(name)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return img
|
||||
}
|
||||
|
||||
func Test_getImages(t *testing.T) {
|
||||
vals, _ := chartutil.ReadValues([]byte(chartValuesA))
|
||||
img, _ := image.Parse("gcr.io/v2-namespace/hello-world:1.1.0")
|
||||
|
||||
promVals, _ := chartutil.ReadValues([]byte(promChartValues))
|
||||
|
||||
type args struct {
|
||||
vals chartutil.Values
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []*types.TrackedImage
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "hello-world image",
|
||||
args: args{
|
||||
vals: vals,
|
||||
},
|
||||
want: []*types.TrackedImage{
|
||||
{
|
||||
Image: img,
|
||||
Trigger: types.TriggerTypePoll,
|
||||
Policy: policy.NewSemverPolicy(policy.SemverPolicyTypeAll, true),
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "prom config from https://raw.githubusercontent.com/helm/charts/master/stable/prometheus-operator/values.yaml",
|
||||
args: args{
|
||||
vals: promVals,
|
||||
},
|
||||
want: []*types.TrackedImage{
|
||||
{
|
||||
Image: mustParse("quay.io/prometheus/alertmanager:v0.16.2"),
|
||||
Trigger: types.TriggerTypePoll,
|
||||
Policy: policy.NewSemverPolicy(policy.SemverPolicyTypeAll, true),
|
||||
},
|
||||
{
|
||||
Image: mustParse("quay.io/coreos/prometheus-operator:v0.29.0"),
|
||||
Trigger: types.TriggerTypePoll,
|
||||
Policy: policy.NewSemverPolicy(policy.SemverPolicyTypeAll, true),
|
||||
},
|
||||
{
|
||||
Image: mustParse("quay.io/prometheus/prometheus:v2.7.2"),
|
||||
Trigger: types.TriggerTypePoll,
|
||||
Policy: policy.NewSemverPolicy(policy.SemverPolicyTypeAll, true),
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := getImages(tt.args.vals)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("getImages() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("getImages() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var promChartValues = `# Default values for prometheus-operator.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# keel config
|
||||
keel:
|
||||
policy: all
|
||||
trigger: poll
|
||||
images:
|
||||
- repository: alertmanager.alertmanagerSpec.image.repository
|
||||
tag: alertmanager.alertmanagerSpec.image.tag
|
||||
- repository: prometheusOperator.image.repository
|
||||
tag: prometheusOperator.image.tag
|
||||
- repository: prometheus.prometheusSpec.image.repository
|
||||
tag: prometheus.prometheusSpec.image.tag
|
||||
|
||||
alertmanager:
|
||||
enabled: true
|
||||
alertmanagerSpec:
|
||||
image:
|
||||
repository: quay.io/prometheus/alertmanager
|
||||
tag: v0.16.2
|
||||
|
||||
prometheusOperator:
|
||||
enabled: true
|
||||
image:
|
||||
repository: quay.io/coreos/prometheus-operator
|
||||
tag: v0.29.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
prometheusSpec:
|
||||
image:
|
||||
repository: quay.io/prometheus/prometheus
|
||||
tag: v2.7.2
|
||||
`
|
|
@ -1,472 +0,0 @@
|
|||
package helm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/keel-hq/keel/approvals"
|
||||
"github.com/keel-hq/keel/internal/policy"
|
||||
"github.com/keel-hq/keel/types"
|
||||
"github.com/keel-hq/keel/util/image"
|
||||
|
||||
hapi_chart "k8s.io/helm/pkg/proto/hapi/chart"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/keel-hq/keel/extension/notification"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/helm/pkg/chartutil"
|
||||
"k8s.io/helm/pkg/helm"
|
||||
"k8s.io/helm/pkg/strvals"
|
||||
)
|
||||
|
||||
var helmVersionedUpdatesCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "helm_versioned_updates_total",
|
||||
Help: "How many versioned helm charts were updated, partitioned by chart name.",
|
||||
},
|
||||
[]string{"chart"},
|
||||
)
|
||||
|
||||
var helmUnversionedUpdatesCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "helm_unversioned_updates_total",
|
||||
Help: "How many unversioned helm charts were updated, partitioned by chart name.",
|
||||
},
|
||||
[]string{"chart"},
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(helmVersionedUpdatesCounter)
|
||||
prometheus.MustRegister(helmUnversionedUpdatesCounter)
|
||||
}
|
||||
|
||||
// ErrPolicyNotSpecified helm related errors
|
||||
var (
|
||||
ErrPolicyNotSpecified = errors.New("policy not specified")
|
||||
)
|
||||
|
||||
// Manager - high level interface into helm provider related data used by
|
||||
// triggers
|
||||
type Manager interface {
|
||||
Images() ([]*image.Reference, error)
|
||||
}
|
||||
|
||||
// ProviderName - helm provider name
|
||||
const ProviderName = "helm"
|
||||
|
||||
// DefaultUpdateTimeout - update timeout in seconds
|
||||
const DefaultUpdateTimeout = 300
|
||||
|
||||
// UpdatePlan - release update plan
|
||||
type UpdatePlan struct {
|
||||
Namespace string
|
||||
Name string
|
||||
|
||||
Config *KeelChartConfig
|
||||
|
||||
// chart
|
||||
Chart *hapi_chart.Chart
|
||||
|
||||
// values to update path=value
|
||||
Values map[string]string
|
||||
|
||||
// Current (last seen cluster version)
|
||||
CurrentVersion string
|
||||
// New version that's already in the deployment
|
||||
NewVersion string
|
||||
|
||||
// ReleaseNotes is a slice of combined release notes.
|
||||
ReleaseNotes []string
|
||||
}
|
||||
|
||||
// keel:
|
||||
// # keel policy (all/major/minor/patch/force)
|
||||
// policy: all
|
||||
// # trigger type, defaults to events such as pubsub, webhooks
|
||||
// trigger: poll
|
||||
// pollSchedule: "@every 2m"
|
||||
// # images to track and update
|
||||
// images:
|
||||
// - repository: image.repository
|
||||
// tag: image.tag
|
||||
|
||||
// Root - root element of the values yaml
|
||||
type Root struct {
|
||||
Keel KeelChartConfig `json:"keel"`
|
||||
}
|
||||
|
||||
// KeelChartConfig - keel related configuration taken from values.yaml
|
||||
type KeelChartConfig struct {
|
||||
Policy string `json:"policy"`
|
||||
MatchTag bool `json:"matchTag"`
|
||||
MatchPreRelease bool `json:"matchPreRelease"`
|
||||
Trigger types.TriggerType `json:"trigger"`
|
||||
PollSchedule string `json:"pollSchedule"`
|
||||
Approvals int `json:"approvals"` // Minimum required approvals
|
||||
ApprovalDeadline int `json:"approvalDeadline"` // Deadline in hours
|
||||
Images []ImageDetails `json:"images"`
|
||||
NotificationChannels []string `json:"notificationChannels"` // optional notification channels
|
||||
|
||||
Plc policy.Policy `json:"-"`
|
||||
}
|
||||
|
||||
// ImageDetails - image details
|
||||
type ImageDetails struct {
|
||||
RepositoryPath string `json:"repository"`
|
||||
TagPath string `json:"tag"`
|
||||
DigestPath string `json:"digest"`
|
||||
ReleaseNotes string `json:"releaseNotes"`
|
||||
ImagePullSecret string `json:"imagePullSecret"`
|
||||
}
|
||||
|
||||
// Provider - helm provider, responsible for managing release updates
|
||||
type Provider struct {
|
||||
implementer Implementer
|
||||
|
||||
sender notification.Sender
|
||||
|
||||
approvalManager approvals.Manager
|
||||
|
||||
events chan *types.Event
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
// NewProvider - create new Helm provider
|
||||
func NewProvider(implementer Implementer, sender notification.Sender, approvalManager approvals.Manager) *Provider {
|
||||
return &Provider{
|
||||
implementer: implementer,
|
||||
approvalManager: approvalManager,
|
||||
sender: sender,
|
||||
events: make(chan *types.Event, 100),
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// GetName - get provider name
|
||||
func (p *Provider) GetName() string {
|
||||
return ProviderName
|
||||
}
|
||||
|
||||
// Submit - submit event to provider
|
||||
func (p *Provider) Submit(event types.Event) error {
|
||||
p.events <- &event
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start - starts kubernetes provider, waits for events
|
||||
func (p *Provider) Start() error {
|
||||
return p.startInternal()
|
||||
}
|
||||
|
||||
// Stop - stops kubernetes provider
|
||||
func (p *Provider) Stop() {
|
||||
close(p.stop)
|
||||
}
|
||||
|
||||
// TrackedImages - returns tracked images from all releases that have keel configuration
|
||||
func (p *Provider) TrackedImages() ([]*types.TrackedImage, error) {
|
||||
var trackedImages []*types.TrackedImage
|
||||
|
||||
releaseList, err := p.implementer.ListReleases()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
releases := releaseList.GetReleases()
|
||||
|
||||
for _, release := range releases {
|
||||
// getting configuration
|
||||
|
||||
if release.Chart.Metadata.Name == "" {
|
||||
return nil, err
|
||||
}
|
||||
vals, err := values(release.Chart, release.Config)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
"release": release.Name,
|
||||
"namespace": release.Namespace,
|
||||
}).Error("provider.helm: failed to get values.yaml for release")
|
||||
continue
|
||||
}
|
||||
|
||||
cfg, err := getKeelConfig(vals)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
"release": release.Name,
|
||||
"namespace": release.Namespace,
|
||||
}).Debug("provider.helm: failed to get config for release")
|
||||
continue
|
||||
}
|
||||
|
||||
if cfg.PollSchedule == "" {
|
||||
cfg.PollSchedule = types.KeelPollDefaultSchedule
|
||||
}
|
||||
// used to check pod secrets
|
||||
selector := fmt.Sprintf("app=%s,release=%s", release.Chart.Metadata.Name, release.Name)
|
||||
|
||||
releaseImages, err := getImages(vals)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
"release": release.Name,
|
||||
"namespace": release.Namespace,
|
||||
}).Error("provider.helm: failed to get images for release")
|
||||
continue
|
||||
}
|
||||
|
||||
for _, img := range releaseImages {
|
||||
img.Meta = map[string]string{
|
||||
"selector": selector,
|
||||
"helm.sh/chart": fmt.Sprintf("%s-%s", release.Chart.Metadata.Name, release.Chart.Metadata.Version),
|
||||
}
|
||||
img.Namespace = release.Namespace
|
||||
img.Provider = ProviderName
|
||||
trackedImages = append(trackedImages, img)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return trackedImages, nil
|
||||
}
|
||||
|
||||
func (p *Provider) startInternal() error {
|
||||
for {
|
||||
select {
|
||||
case event := <-p.events:
|
||||
err := p.processEvent(event)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
"image": event.Repository.Name,
|
||||
"tag": event.Repository.Tag,
|
||||
}).Error("provider.helm: failed to process event")
|
||||
}
|
||||
case <-p.stop:
|
||||
log.Info("provider.helm: got shutdown signal, stopping...")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) processEvent(event *types.Event) (err error) {
|
||||
plans, err := p.createUpdatePlans(event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
approved := p.checkForApprovals(event, plans)
|
||||
|
||||
return p.applyPlans(approved)
|
||||
}
|
||||
|
||||
func (p *Provider) createUpdatePlans(event *types.Event) ([]*UpdatePlan, error) {
|
||||
var plans []*UpdatePlan
|
||||
|
||||
releaseList, err := p.implementer.ListReleases()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
releases := releaseList.GetReleases()
|
||||
|
||||
for _, release := range releases {
|
||||
|
||||
// plan, update, err := checkRelease(newVersion, &event.Repository, release.Namespace, release.Name, release.Chart, release.Config)
|
||||
plan, update, err := checkRelease(&event.Repository, release.Namespace, release.Name, release.Chart, release.Config)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
"name": release.Name,
|
||||
"namespace": release.Namespace,
|
||||
}).Error("provider.helm: failed to process versioned release")
|
||||
continue
|
||||
}
|
||||
if update {
|
||||
helmVersionedUpdatesCounter.With(prometheus.Labels{"chart": fmt.Sprintf("%s/%s", release.Namespace, release.Name)}).Inc()
|
||||
plans = append(plans, plan)
|
||||
}
|
||||
}
|
||||
|
||||
return plans, nil
|
||||
}
|
||||
|
||||
func (p *Provider) applyPlans(plans []*UpdatePlan) error {
|
||||
for _, plan := range plans {
|
||||
|
||||
p.sender.Send(types.EventNotification{
|
||||
ResourceKind: "chart",
|
||||
Identifier: fmt.Sprintf("%s/%s/%s", "chart", plan.Namespace, plan.Name),
|
||||
Name: "update release",
|
||||
Message: fmt.Sprintf("Preparing to update release %s/%s %s->%s (%s)", plan.Namespace, plan.Name, plan.CurrentVersion, plan.NewVersion, strings.Join(mapToSlice(plan.Values), ", ")),
|
||||
CreatedAt: time.Now(),
|
||||
Type: types.NotificationPreReleaseUpdate,
|
||||
Level: types.LevelDebug,
|
||||
Channels: plan.Config.NotificationChannels,
|
||||
Metadata: map[string]string{
|
||||
"provider": p.GetName(),
|
||||
"namespace": plan.Namespace,
|
||||
"name": plan.Name,
|
||||
},
|
||||
})
|
||||
|
||||
err := updateHelmRelease(p.implementer, plan.Name, plan.Chart, plan.Values)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
"name": plan.Name,
|
||||
"namespace": plan.Namespace,
|
||||
}).Error("provider.helm: failed to apply plan")
|
||||
|
||||
p.sender.Send(types.EventNotification{
|
||||
ResourceKind: "chart",
|
||||
Identifier: fmt.Sprintf("%s/%s/%s", "chart", plan.Namespace, plan.Name),
|
||||
Name: "update release",
|
||||
Message: fmt.Sprintf("Release update failed %s/%s %s->%s (%s), error: %s", plan.Namespace, plan.Name, plan.CurrentVersion, plan.NewVersion, strings.Join(mapToSlice(plan.Values), ", "), err),
|
||||
CreatedAt: time.Now(),
|
||||
Type: types.NotificationReleaseUpdate,
|
||||
Level: types.LevelError,
|
||||
Channels: plan.Config.NotificationChannels,
|
||||
Metadata: map[string]string{
|
||||
"provider": p.GetName(),
|
||||
"namespace": plan.Namespace,
|
||||
"name": plan.Name,
|
||||
},
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
err = p.updateComplete(plan)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
"name": plan.Name,
|
||||
"namespace": plan.Namespace,
|
||||
}).Debug("provider.helm: got error while resetting approvals counter after successful update")
|
||||
}
|
||||
|
||||
var msg string
|
||||
if len(plan.ReleaseNotes) == 0 {
|
||||
msg = fmt.Sprintf("Successfully updated release %s/%s %s->%s (%s)", plan.Namespace, plan.Name, plan.CurrentVersion, plan.NewVersion, strings.Join(mapToSlice(plan.Values), ", "))
|
||||
} else {
|
||||
msg = fmt.Sprintf("Successfully updated release %s/%s %s->%s (%s). Release notes: %s", plan.Namespace, plan.Name, plan.CurrentVersion, plan.NewVersion, strings.Join(mapToSlice(plan.Values), ", "), strings.Join(plan.ReleaseNotes, ", "))
|
||||
}
|
||||
|
||||
p.sender.Send(types.EventNotification{
|
||||
ResourceKind: "chart",
|
||||
Identifier: fmt.Sprintf("%s/%s/%s", "chart", plan.Namespace, plan.Name),
|
||||
Name: "update release",
|
||||
Message: msg,
|
||||
CreatedAt: time.Now(),
|
||||
Type: types.NotificationReleaseUpdate,
|
||||
Level: types.LevelSuccess,
|
||||
Channels: plan.Config.NotificationChannels,
|
||||
Metadata: map[string]string{
|
||||
"provider": p.GetName(),
|
||||
"namespace": plan.Namespace,
|
||||
"name": plan.Name,
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateHelmRelease(implementer Implementer, releaseName string, chart *hapi_chart.Chart, overrideValues map[string]string) error {
|
||||
|
||||
overrideBts, err := convertToYaml(mapToSlice(overrideValues))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := implementer.UpdateReleaseFromChart(releaseName, chart,
|
||||
helm.UpdateValueOverrides(overrideBts),
|
||||
helm.UpgradeDryRun(false),
|
||||
helm.UpgradeRecreate(false),
|
||||
helm.UpgradeForce(true),
|
||||
helm.UpgradeDisableHooks(false),
|
||||
helm.UpgradeTimeout(DefaultUpdateTimeout),
|
||||
helm.ResetValues(false),
|
||||
helm.ReuseValues(true),
|
||||
helm.UpgradeWait(true))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"version": resp.Release.Version,
|
||||
"release": releaseName,
|
||||
}).Info("provider.helm: release updated")
|
||||
return nil
|
||||
}
|
||||
|
||||
func mapToSlice(values map[string]string) []string {
|
||||
converted := []string{}
|
||||
for k, v := range values {
|
||||
concat := k + "=" + v
|
||||
converted = append(converted, concat)
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
// parse
|
||||
func convertToYaml(values []string) ([]byte, error) {
|
||||
base := map[string]interface{}{}
|
||||
for _, value := range values {
|
||||
if err := strvals.ParseInto(value, base); err != nil {
|
||||
return []byte{}, fmt.Errorf("failed parsing --set data: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return yaml.Marshal(base)
|
||||
}
|
||||
|
||||
func getValueAsString(vals chartutil.Values, path string) (string, error) {
|
||||
valinterface, err := vals.PathValue(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
valString, ok := valinterface.(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("failed to convert value to string")
|
||||
}
|
||||
|
||||
return valString, nil
|
||||
}
|
||||
|
||||
func values(chart *hapi_chart.Chart, config *hapi_chart.Config) (chartutil.Values, error) {
|
||||
return chartutil.CoalesceValues(chart, config)
|
||||
}
|
||||
|
||||
func getKeelConfig(vals chartutil.Values) (*KeelChartConfig, error) {
|
||||
yamlFull, err := vals.YAML()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get vals config, error: %s", err)
|
||||
}
|
||||
|
||||
var r Root
|
||||
// Default MatchPreRelease to true if not present (backward compatibility)
|
||||
r.Keel.MatchPreRelease = true
|
||||
err = yaml.Unmarshal([]byte(yamlFull), &r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse keel config: %s", err)
|
||||
}
|
||||
|
||||
if r.Keel.Policy == "" {
|
||||
return nil, ErrPolicyNotSpecified
|
||||
}
|
||||
|
||||
cfg := r.Keel
|
||||
|
||||
cfg.Plc = policy.GetPolicy(cfg.Policy, &policy.Options{MatchTag: cfg.MatchTag, MatchPreRelease: cfg.MatchPreRelease})
|
||||
|
||||
return &cfg, nil
|
||||
}
|
|
@ -1,726 +0,0 @@
|
|||
package helm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/keel-hq/keel/approvals"
|
||||
"github.com/keel-hq/keel/extension/notification"
|
||||
"github.com/keel-hq/keel/internal/policy"
|
||||
"github.com/keel-hq/keel/pkg/store/sql"
|
||||
"github.com/keel-hq/keel/types"
|
||||
"k8s.io/helm/pkg/chartutil"
|
||||
"k8s.io/helm/pkg/helm"
|
||||
"k8s.io/helm/pkg/proto/hapi/chart"
|
||||
hapi_release5 "k8s.io/helm/pkg/proto/hapi/release"
|
||||
rls "k8s.io/helm/pkg/proto/hapi/services"
|
||||
)
|
||||
|
||||
func newTestingUtils() (*sql.SQLStore, func()) {
|
||||
dir, err := ioutil.TempDir("", "whstoretest")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
tmpfn := filepath.Join(dir, "gorm.db")
|
||||
// defer
|
||||
store, err := sql.New(sql.Opts{DatabaseType: "sqlite3", URI: tmpfn})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
teardown := func() {
|
||||
os.RemoveAll(dir) // clean up
|
||||
}
|
||||
|
||||
return store, teardown
|
||||
}
|
||||
|
||||
func approver() (*approvals.DefaultManager, func()) {
|
||||
store, teardown := newTestingUtils()
|
||||
return approvals.New(&approvals.Opts{
|
||||
Store: store,
|
||||
}), teardown
|
||||
}
|
||||
|
||||
type fakeSender struct {
|
||||
sentEvent types.EventNotification
|
||||
}
|
||||
|
||||
func (s *fakeSender) Configure(cfg *notification.Config) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *fakeSender) Send(event types.EventNotification) error {
|
||||
s.sentEvent = event
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeImplementer struct {
|
||||
listReleasesResponse *rls.ListReleasesResponse
|
||||
|
||||
// updated info
|
||||
updatedRlsName string
|
||||
updatedChart *chart.Chart
|
||||
updatedOptions []helm.UpdateOption
|
||||
}
|
||||
|
||||
func (i *fakeImplementer) ListReleases(opts ...helm.ReleaseListOption) (*rls.ListReleasesResponse, error) {
|
||||
return i.listReleasesResponse, nil
|
||||
}
|
||||
|
||||
func (i *fakeImplementer) UpdateReleaseFromChart(rlsName string, chart *chart.Chart, opts ...helm.UpdateOption) (*rls.UpdateReleaseResponse, error) {
|
||||
i.updatedRlsName = rlsName
|
||||
i.updatedChart = chart
|
||||
i.updatedOptions = opts
|
||||
|
||||
return &rls.UpdateReleaseResponse{
|
||||
Release: &hapi_release5.Release{
|
||||
Version: 2,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// helper function to generate keel configuration
|
||||
func testingConfigYaml(cfg *KeelChartConfig) (vals chartutil.Values, err error) {
|
||||
root := &Root{Keel: *cfg}
|
||||
bts, err := yaml.Marshal(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return chartutil.ReadValues(bts)
|
||||
}
|
||||
|
||||
func TestGetChartPolicy(t *testing.T) {
|
||||
|
||||
chartVals := `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.1.0
|
||||
|
||||
keel:
|
||||
policy: all
|
||||
trigger: poll
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
`
|
||||
|
||||
fakeImpl := &fakeImplementer{
|
||||
listReleasesResponse: &rls.ListReleasesResponse{
|
||||
Releases: []*hapi_release5.Release{
|
||||
{
|
||||
Name: "release-1",
|
||||
Chart: &chart.Chart{
|
||||
Values: &chart.Config{Raw: chartVals},
|
||||
Metadata: &chart.Metadata{Name: "app-x"},
|
||||
},
|
||||
Config: &chart.Config{Raw: ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
releases, err := fakeImpl.ListReleases()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
policyFound := false
|
||||
|
||||
for _, release := range releases.Releases {
|
||||
|
||||
vals, err := values(release.Chart, release.Config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get values: %s", err)
|
||||
}
|
||||
|
||||
cfg, err := getKeelConfig(vals)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get image paths: %s", err)
|
||||
}
|
||||
|
||||
if cfg.Plc.Name() == policy.SemverPolicyTypeAll.String() {
|
||||
policyFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if !policyFound {
|
||||
t.Errorf("policy not found")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetChartPolicyFromProm(t *testing.T) {
|
||||
|
||||
fakeImpl := &fakeImplementer{
|
||||
listReleasesResponse: &rls.ListReleasesResponse{
|
||||
Releases: []*hapi_release5.Release{
|
||||
{
|
||||
Name: "release-1",
|
||||
Chart: &chart.Chart{
|
||||
Values: &chart.Config{Raw: promChartValues},
|
||||
Metadata: &chart.Metadata{Name: "app-x"},
|
||||
},
|
||||
Config: &chart.Config{Raw: ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
releases, err := fakeImpl.ListReleases()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
policyFound := false
|
||||
|
||||
for _, release := range releases.Releases {
|
||||
|
||||
vals, err := values(release.Chart, release.Config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get values: %s", err)
|
||||
}
|
||||
|
||||
cfg, err := getKeelConfig(vals)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get image paths: %s", err)
|
||||
}
|
||||
|
||||
if cfg.Plc.Name() == policy.SemverPolicyTypeAll.String() {
|
||||
policyFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if !policyFound {
|
||||
t.Errorf("policy not found")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetTrackedReleases(t *testing.T) {
|
||||
|
||||
chartVals := `
|
||||
name: chart-x
|
||||
where:
|
||||
city: kaunas
|
||||
title: hmm
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/bye-world
|
||||
tag: 1.1.0
|
||||
|
||||
image2:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.2.0
|
||||
|
||||
keel:
|
||||
policy: all
|
||||
trigger: poll
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
|
||||
`
|
||||
|
||||
fakeImpl := &fakeImplementer{
|
||||
listReleasesResponse: &rls.ListReleasesResponse{
|
||||
Releases: []*hapi_release5.Release{
|
||||
{
|
||||
Name: "release-1",
|
||||
Chart: &chart.Chart{
|
||||
Values: &chart.Config{Raw: chartVals},
|
||||
Metadata: &chart.Metadata{Name: "app-x"},
|
||||
},
|
||||
Config: &chart.Config{Raw: ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
approver, teardown := approver()
|
||||
defer teardown()
|
||||
prov := NewProvider(fakeImpl, &fakeSender{}, approver)
|
||||
|
||||
tracked, _ := prov.TrackedImages()
|
||||
|
||||
if tracked[0].Image.Remote() != "gcr.io/v2-namespace/bye-world:1.1.0" {
|
||||
t.Errorf("unexpected image: %s", tracked[0].Image.Remote())
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetTrackedReleasesWithoutKeelConfig(t *testing.T) {
|
||||
|
||||
chartVals := `
|
||||
name: chart-x
|
||||
where:
|
||||
city: kaunas
|
||||
title: hmm
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/bye-world
|
||||
tag: 1.1.0
|
||||
|
||||
image2:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.2.0
|
||||
|
||||
`
|
||||
|
||||
fakeImpl := &fakeImplementer{
|
||||
listReleasesResponse: &rls.ListReleasesResponse{
|
||||
Releases: []*hapi_release5.Release{
|
||||
{
|
||||
Name: "release-1",
|
||||
Chart: &chart.Chart{
|
||||
Values: &chart.Config{Raw: chartVals},
|
||||
Metadata: &chart.Metadata{Name: "app-x"},
|
||||
},
|
||||
Config: &chart.Config{Raw: ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
approver, teardown := approver()
|
||||
defer teardown()
|
||||
prov := NewProvider(fakeImpl, &fakeSender{}, approver)
|
||||
|
||||
tracked, _ := prov.TrackedImages()
|
||||
|
||||
if len(tracked) != 0 {
|
||||
t.Errorf("didn't expect to find any tracked releases, found: %d", len(tracked))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetTrackedReleasesTotallyNonStandard(t *testing.T) {
|
||||
|
||||
chartVals := `
|
||||
name: chart-x
|
||||
where:
|
||||
city: kaunas
|
||||
title: hmm
|
||||
ihavemyownstandard:
|
||||
repo: gcr.io/v2-namespace/bye-world
|
||||
version: 1.1.0
|
||||
|
||||
image2:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.2.0
|
||||
|
||||
keel:
|
||||
policy: all
|
||||
trigger: poll
|
||||
images:
|
||||
- repository: ihavemyownstandard.repo
|
||||
tag: ihavemyownstandard.version
|
||||
|
||||
`
|
||||
|
||||
fakeImpl := &fakeImplementer{
|
||||
listReleasesResponse: &rls.ListReleasesResponse{
|
||||
Releases: []*hapi_release5.Release{
|
||||
{
|
||||
Name: "release-1",
|
||||
Chart: &chart.Chart{
|
||||
Values: &chart.Config{Raw: chartVals},
|
||||
Metadata: &chart.Metadata{Name: "app-x"},
|
||||
},
|
||||
Config: &chart.Config{Raw: ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
approver, teardown := approver()
|
||||
defer teardown()
|
||||
prov := NewProvider(fakeImpl, &fakeSender{}, approver)
|
||||
|
||||
tracked, _ := prov.TrackedImages()
|
||||
|
||||
if tracked[0].Image.Remote() != "gcr.io/v2-namespace/bye-world:1.1.0" {
|
||||
t.Errorf("unexpected image: %s", tracked[0].Image.Remote())
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetTriggerFromConfig(t *testing.T) {
|
||||
vals, err := testingConfigYaml(&KeelChartConfig{Trigger: types.TriggerTypePoll, Policy: "all"})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load testdata: %s", err)
|
||||
}
|
||||
|
||||
cfg, err := getKeelConfig(vals)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get image paths: %s", err)
|
||||
}
|
||||
|
||||
if cfg.Trigger != types.TriggerTypePoll {
|
||||
t.Errorf("invalid trigger: %s", cfg.Trigger)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPolicyFromConfig(t *testing.T) {
|
||||
vals, err := testingConfigYaml(&KeelChartConfig{Policy: "all"})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load testdata: %s", err)
|
||||
}
|
||||
|
||||
cfg, err := getKeelConfig(vals)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get image paths: %s", err)
|
||||
}
|
||||
|
||||
// if cfg.Policy != types.PolicyTypeAll {
|
||||
if cfg.Plc.Name() != policy.SemverPolicyTypeAll.String() {
|
||||
t.Errorf("invalid policy: %s", cfg.Policy)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagesFromConfig(t *testing.T) {
|
||||
vals, err := testingConfigYaml(&KeelChartConfig{Policy: "all", Images: []ImageDetails{
|
||||
{
|
||||
RepositoryPath: "repopath",
|
||||
TagPath: "tagpath",
|
||||
},
|
||||
}})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load testdata: %s", err)
|
||||
}
|
||||
|
||||
cfg, err := getKeelConfig(vals)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get image paths: %s", err)
|
||||
}
|
||||
|
||||
if cfg.Images[0].RepositoryPath != "repopath" {
|
||||
t.Errorf("invalid repo path: %s", cfg.Images[0].RepositoryPath)
|
||||
}
|
||||
|
||||
if cfg.Images[0].TagPath != "tagpath" {
|
||||
t.Errorf("invalid tag path: %s", cfg.Images[0].TagPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateRelease(t *testing.T) {
|
||||
|
||||
chartVals := `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: karolisr/webhook-demo
|
||||
tag: 0.0.10
|
||||
|
||||
keel:
|
||||
policy: all
|
||||
trigger: poll
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
|
||||
`
|
||||
myChart := &chart.Chart{
|
||||
Values: &chart.Config{Raw: chartVals},
|
||||
Metadata: &chart.Metadata{Name: "app-x"},
|
||||
}
|
||||
|
||||
fakeImpl := &fakeImplementer{
|
||||
listReleasesResponse: &rls.ListReleasesResponse{
|
||||
Releases: []*hapi_release5.Release{
|
||||
{
|
||||
Name: "release-1",
|
||||
Chart: myChart,
|
||||
Config: &chart.Config{Raw: ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
approver, teardown := approver()
|
||||
defer teardown()
|
||||
provider := NewProvider(fakeImpl, &fakeSender{}, approver)
|
||||
|
||||
err := provider.processEvent(&types.Event{
|
||||
Repository: types.Repository{
|
||||
Name: "karolisr/webhook-demo",
|
||||
Tag: "0.0.11",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("failed to process event, error: %s", err)
|
||||
}
|
||||
|
||||
// checking updated release
|
||||
if fakeImpl.updatedChart != myChart {
|
||||
t.Errorf("wrong chart updated")
|
||||
}
|
||||
|
||||
if fakeImpl.updatedRlsName != "release-1" {
|
||||
t.Errorf("unexpected release updated: %s", fakeImpl.updatedRlsName)
|
||||
}
|
||||
}
|
||||
|
||||
var pollingValues = `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.1.0
|
||||
|
||||
keel:
|
||||
policy: all
|
||||
trigger: poll
|
||||
pollSchedule: "@every 12m"
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
|
||||
`
|
||||
|
||||
func TestGetPollingSchedule(t *testing.T) {
|
||||
vals, _ := chartutil.ReadValues([]byte(pollingValues))
|
||||
|
||||
cfg, err := getKeelConfig(vals)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get config: %s", err)
|
||||
}
|
||||
|
||||
if cfg.PollSchedule != "@every 12m" {
|
||||
t.Errorf("unexpected polling schedule: %s", cfg.PollSchedule)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getKeelConfig(t *testing.T) {
|
||||
|
||||
var valuesBasicStr = `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.1.0
|
||||
|
||||
keel:
|
||||
policy: all
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
|
||||
`
|
||||
valuesBasic, _ := chartutil.ReadValues([]byte(valuesBasicStr))
|
||||
|
||||
var valuesChannelsStr = `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.1.0
|
||||
|
||||
keel:
|
||||
policy: all
|
||||
notificationChannels:
|
||||
- chan1
|
||||
- chan2
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
|
||||
`
|
||||
valuesChannels, _ := chartutil.ReadValues([]byte(valuesChannelsStr))
|
||||
|
||||
var valuesPollStr = `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.1.0
|
||||
|
||||
keel:
|
||||
policy: major
|
||||
trigger: poll
|
||||
pollSchedule: "@every 30m"
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
imagePullSecret: such-secret
|
||||
`
|
||||
valuesPoll, _ := chartutil.ReadValues([]byte(valuesPollStr))
|
||||
|
||||
var valuesNoMatchPreReleaseStr = `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.1.0
|
||||
|
||||
keel:
|
||||
policy: all
|
||||
matchPreRelease: false
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
|
||||
`
|
||||
valuesNoMatchPreRelease, _ := chartutil.ReadValues([]byte(valuesNoMatchPreReleaseStr))
|
||||
|
||||
type args struct {
|
||||
vals chartutil.Values
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *KeelChartConfig
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "correct config",
|
||||
args: args{vals: valuesBasic},
|
||||
want: &KeelChartConfig{
|
||||
Policy: "all",
|
||||
MatchPreRelease: true,
|
||||
Trigger: types.TriggerTypeDefault,
|
||||
Images: []ImageDetails{
|
||||
{RepositoryPath: "image.repository", TagPath: "image.tag"},
|
||||
},
|
||||
Plc: policy.NewSemverPolicy(policy.SemverPolicyTypeAll, true),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "custom notification channels",
|
||||
args: args{vals: valuesChannels},
|
||||
want: &KeelChartConfig{
|
||||
Policy: "all",
|
||||
MatchPreRelease: true,
|
||||
Trigger: types.TriggerTypeDefault,
|
||||
NotificationChannels: []string{"chan1", "chan2"},
|
||||
Images: []ImageDetails{
|
||||
{RepositoryPath: "image.repository", TagPath: "image.tag"},
|
||||
},
|
||||
Plc: policy.NewSemverPolicy(policy.SemverPolicyTypeAll, true),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "correct polling config",
|
||||
args: args{vals: valuesPoll},
|
||||
want: &KeelChartConfig{
|
||||
Policy: "major",
|
||||
MatchPreRelease: true,
|
||||
Trigger: types.TriggerTypePoll,
|
||||
PollSchedule: "@every 30m",
|
||||
Images: []ImageDetails{
|
||||
{RepositoryPath: "image.repository", TagPath: "image.tag", ImagePullSecret: "such-secret"},
|
||||
},
|
||||
Plc: policy.NewSemverPolicy(policy.SemverPolicyTypeMajor, true),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "disable matchPreRelease",
|
||||
args: args{vals: valuesNoMatchPreRelease},
|
||||
want: &KeelChartConfig{
|
||||
Policy: "all",
|
||||
MatchPreRelease: false,
|
||||
Trigger: types.TriggerTypeDefault,
|
||||
Images: []ImageDetails{
|
||||
{RepositoryPath: "image.repository", TagPath: "image.tag"},
|
||||
},
|
||||
Plc: policy.NewSemverPolicy(policy.SemverPolicyTypeAll, false),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := getKeelConfig(tt.args.vals)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("getKeelConfig() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("getKeelConfig() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetChartMatchTag(t *testing.T) {
|
||||
|
||||
chartVals := `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.1.0
|
||||
|
||||
keel:
|
||||
policy: all
|
||||
trigger: poll
|
||||
matchTag: true
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
|
||||
`
|
||||
|
||||
fakeImpl := &fakeImplementer{
|
||||
listReleasesResponse: &rls.ListReleasesResponse{
|
||||
Releases: []*hapi_release5.Release{
|
||||
{
|
||||
Name: "release-1",
|
||||
Chart: &chart.Chart{
|
||||
Values: &chart.Config{Raw: chartVals},
|
||||
Metadata: &chart.Metadata{Name: "app-x"},
|
||||
},
|
||||
Config: &chart.Config{Raw: ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
releases, err := fakeImpl.ListReleases()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
policyFound := false
|
||||
|
||||
for _, release := range releases.Releases {
|
||||
|
||||
vals, err := values(release.Chart, release.Config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get values: %s", err)
|
||||
}
|
||||
|
||||
cfg, err := getKeelConfig(vals)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get image paths: %s", err)
|
||||
}
|
||||
|
||||
if cfg.Plc.Name() == policy.SemverPolicyTypeAll.String() {
|
||||
policyFound = true
|
||||
}
|
||||
if !cfg.MatchTag {
|
||||
t.Errorf("expected to find 'matchTag' == true ")
|
||||
}
|
||||
}
|
||||
|
||||
if !policyFound {
|
||||
t.Errorf("policy not found")
|
||||
}
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
package helm
|
||||
|
||||
import (
|
||||
"k8s.io/helm/pkg/helm"
|
||||
"k8s.io/helm/pkg/proto/hapi/chart"
|
||||
rls "k8s.io/helm/pkg/proto/hapi/services"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// TillerAddress - default tiller address
|
||||
var (
|
||||
TillerAddress = "tiller-deploy:44134"
|
||||
)
|
||||
|
||||
// Implementer - generic helm implementer used to abstract actual implementation
|
||||
type Implementer interface {
|
||||
ListReleases(opts ...helm.ReleaseListOption) (*rls.ListReleasesResponse, error)
|
||||
UpdateReleaseFromChart(rlsName string, chart *chart.Chart, opts ...helm.UpdateOption) (*rls.UpdateReleaseResponse, error)
|
||||
}
|
||||
|
||||
// HelmImplementer - actual helm implementer
|
||||
type HelmImplementer struct {
|
||||
client helm.Interface
|
||||
}
|
||||
|
||||
// NewHelmImplementer - get new helm implementer
|
||||
func NewHelmImplementer(address string) *HelmImplementer {
|
||||
if address == "" {
|
||||
address = TillerAddress
|
||||
} else {
|
||||
log.Infof("provider.helm: tiller address '%s' supplied", address)
|
||||
}
|
||||
|
||||
return &HelmImplementer{
|
||||
client: helm.NewClient(helm.Host(address)),
|
||||
}
|
||||
}
|
||||
|
||||
// ListReleases - list available releases
|
||||
func (i *HelmImplementer) ListReleases(opts ...helm.ReleaseListOption) (*rls.ListReleasesResponse, error) {
|
||||
return i.client.ListReleases(opts...)
|
||||
}
|
||||
|
||||
// UpdateReleaseFromChart - update release from chart
|
||||
func (i *HelmImplementer) UpdateReleaseFromChart(rlsName string, chart *chart.Chart, opts ...helm.UpdateOption) (*rls.UpdateReleaseResponse, error) {
|
||||
return i.client.UpdateReleaseFromChart(rlsName, chart, opts...)
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
package helm
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestImplementerList(t *testing.T) {
|
||||
t.Skip()
|
||||
|
||||
imp := NewHelmImplementer("192.168.99.100:30083")
|
||||
releases, err := imp.ListReleases()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if releases.Count == 0 {
|
||||
t.Errorf("why no releases? ")
|
||||
}
|
||||
|
||||
}
|
|
@ -1,127 +0,0 @@
|
|||
package helm
|
||||
|
||||
import (
|
||||
"github.com/keel-hq/keel/internal/policy"
|
||||
"github.com/keel-hq/keel/types"
|
||||
"github.com/keel-hq/keel/util/image"
|
||||
|
||||
hapi_chart "k8s.io/helm/pkg/proto/hapi/chart"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func checkRelease(repo *types.Repository, namespace, name string, chart *hapi_chart.Chart, config *hapi_chart.Config) (plan *UpdatePlan, shouldUpdateRelease bool, err error) {
|
||||
|
||||
plan = &UpdatePlan{
|
||||
Chart: chart,
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
Values: make(map[string]string),
|
||||
}
|
||||
|
||||
eventRepoRef, err := image.Parse(repo.String())
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
"repository_name": repo.Name,
|
||||
}).Error("provider.helm: failed to parse event repository name")
|
||||
return
|
||||
}
|
||||
|
||||
// getting configuration
|
||||
vals, err := values(chart, config)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
}).Error("provider.helm: failed to get values.yaml for release")
|
||||
return
|
||||
}
|
||||
|
||||
keelCfg, err := getKeelConfig(vals)
|
||||
if err != nil {
|
||||
if err == ErrPolicyNotSpecified {
|
||||
// nothing to do
|
||||
return plan, false, nil
|
||||
}
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
}).Error("provider.helm: failed to get keel configuration for release")
|
||||
// ignoring this release, no keel config found
|
||||
return plan, false, nil
|
||||
}
|
||||
log.Infof("policy for release %s/%s parsed: %s", namespace, name, keelCfg.Plc.Name())
|
||||
|
||||
if keelCfg.Plc.Type() == policy.PolicyTypeNone {
|
||||
// policy is not set, ignoring release
|
||||
return plan, false, nil
|
||||
}
|
||||
|
||||
// checking for impacted images
|
||||
for _, imageDetails := range keelCfg.Images {
|
||||
imageRef, err := parseImage(vals, &imageDetails)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
"repository_name": imageDetails.RepositoryPath,
|
||||
"repository_tag": imageDetails.TagPath,
|
||||
}).Error("provider.helm: failed to parse image")
|
||||
continue
|
||||
}
|
||||
|
||||
if imageRef.Repository() != eventRepoRef.Repository() {
|
||||
log.WithFields(log.Fields{
|
||||
"parsed_image_name": imageRef.Remote(),
|
||||
"target_image_name": repo.Name,
|
||||
}).Debug("provider.helm: images do not match, ignoring")
|
||||
continue
|
||||
}
|
||||
|
||||
shouldUpdate, err := keelCfg.Plc.ShouldUpdate(imageRef.Tag(), eventRepoRef.Tag())
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
"repository_name": imageDetails.RepositoryPath,
|
||||
"repository_tag": imageDetails.TagPath,
|
||||
}).Error("provider.helm: got error while checking whether update the chart")
|
||||
continue
|
||||
}
|
||||
|
||||
if !shouldUpdate {
|
||||
log.WithFields(log.Fields{
|
||||
"parsed_image_name": imageRef.Remote(),
|
||||
"target_image_name": repo.Name,
|
||||
"policy": keelCfg.Plc.Name(),
|
||||
}).Info("provider.helm: ignoring")
|
||||
continue
|
||||
}
|
||||
|
||||
// if keelCfg.MatchTag && imageRef.Tag() != eventRepoRef.Tag() {
|
||||
// log.WithFields(log.Fields{
|
||||
// "parsed_image_name": imageRef.Remote(),
|
||||
// "target_image_name": repo.Name,
|
||||
// "policy": keelCfg.Policy.String(),
|
||||
// }).Info("provider.helm: match tag set but tags do not match, ignoring")
|
||||
// continue
|
||||
// }
|
||||
|
||||
if imageDetails.DigestPath != "" {
|
||||
plan.Values[imageDetails.DigestPath] = repo.Digest
|
||||
log.WithFields(log.Fields{
|
||||
"image_details_digestPath": imageDetails.DigestPath,
|
||||
"target_image_digest": repo.Digest,
|
||||
}).Debug("provider.helm: setting image Digest")
|
||||
}
|
||||
|
||||
path, value := getUnversionedPlanValues(repo.Tag, imageRef, &imageDetails)
|
||||
plan.Values[path] = value
|
||||
plan.NewVersion = repo.Tag
|
||||
plan.CurrentVersion = imageRef.Tag()
|
||||
plan.Config = keelCfg
|
||||
shouldUpdateRelease = true
|
||||
if imageDetails.ReleaseNotes != "" {
|
||||
plan.ReleaseNotes = append(plan.ReleaseNotes, imageDetails.ReleaseNotes)
|
||||
}
|
||||
}
|
||||
|
||||
return plan, shouldUpdateRelease, nil
|
||||
}
|
|
@ -1,474 +0,0 @@
|
|||
package helm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/keel-hq/keel/internal/policy"
|
||||
"github.com/keel-hq/keel/types"
|
||||
hapi_chart "k8s.io/helm/pkg/proto/hapi/chart"
|
||||
)
|
||||
|
||||
func Test_checkUnversionedRelease(t *testing.T) {
|
||||
chartValuesPolicyForce := `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.1.0
|
||||
|
||||
keel:
|
||||
policy: force
|
||||
trigger: poll
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
|
||||
`
|
||||
chartValuesPolicyForceReleaseNotes := `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.1.0
|
||||
|
||||
keel:
|
||||
policy: force
|
||||
trigger: poll
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
releaseNotes: https://github.com/keel-hq/keel/releases
|
||||
|
||||
`
|
||||
|
||||
chartValuesPolicyMajor := `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.1.0
|
||||
|
||||
keel:
|
||||
policy: major
|
||||
trigger: poll
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
|
||||
`
|
||||
|
||||
helloWorldChart := &hapi_chart.Chart{
|
||||
Values: &hapi_chart.Config{Raw: chartValuesPolicyForce},
|
||||
Metadata: &hapi_chart.Metadata{Name: "app-x"},
|
||||
}
|
||||
|
||||
helloWorldChartPolicyMajor := &hapi_chart.Chart{
|
||||
Values: &hapi_chart.Config{Raw: chartValuesPolicyMajor},
|
||||
Metadata: &hapi_chart.Metadata{Name: "app-x"},
|
||||
}
|
||||
|
||||
helloWorldChartPolicyMajorReleaseNotes := &hapi_chart.Chart{
|
||||
Values: &hapi_chart.Config{Raw: chartValuesPolicyForceReleaseNotes},
|
||||
Metadata: &hapi_chart.Metadata{Name: "app-x"},
|
||||
}
|
||||
|
||||
type args struct {
|
||||
repo *types.Repository
|
||||
namespace string
|
||||
name string
|
||||
chart *hapi_chart.Chart
|
||||
config *hapi_chart.Config
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantPlan *UpdatePlan
|
||||
wantShouldUpdateRelease bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "correct force update",
|
||||
args: args{
|
||||
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "latest"},
|
||||
namespace: "default",
|
||||
name: "release-1",
|
||||
chart: helloWorldChart,
|
||||
config: &hapi_chart.Config{Raw: ""},
|
||||
},
|
||||
wantPlan: &UpdatePlan{
|
||||
Namespace: "default",
|
||||
Name: "release-1",
|
||||
Chart: helloWorldChart,
|
||||
Values: map[string]string{"image.tag": "latest"},
|
||||
CurrentVersion: "1.1.0",
|
||||
NewVersion: "latest",
|
||||
Config: &KeelChartConfig{
|
||||
Policy: "force",
|
||||
MatchPreRelease: true,
|
||||
Trigger: types.TriggerTypePoll,
|
||||
Images: []ImageDetails{
|
||||
{
|
||||
RepositoryPath: "image.repository",
|
||||
TagPath: "image.tag",
|
||||
},
|
||||
},
|
||||
Plc: policy.NewForcePolicy(false),
|
||||
},
|
||||
},
|
||||
wantShouldUpdateRelease: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "correct force update, with release notes",
|
||||
args: args{
|
||||
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.2.0"},
|
||||
namespace: "default",
|
||||
name: "release-1",
|
||||
chart: helloWorldChartPolicyMajorReleaseNotes,
|
||||
config: &hapi_chart.Config{Raw: ""},
|
||||
},
|
||||
wantPlan: &UpdatePlan{
|
||||
Namespace: "default",
|
||||
Name: "release-1",
|
||||
Chart: helloWorldChartPolicyMajorReleaseNotes,
|
||||
Values: map[string]string{"image.tag": "1.2.0"},
|
||||
CurrentVersion: "1.1.0",
|
||||
NewVersion: "1.2.0",
|
||||
ReleaseNotes: []string{"https://github.com/keel-hq/keel/releases"},
|
||||
Config: &KeelChartConfig{
|
||||
Policy: "force",
|
||||
MatchPreRelease: true,
|
||||
Trigger: types.TriggerTypePoll,
|
||||
Images: []ImageDetails{
|
||||
{
|
||||
RepositoryPath: "image.repository",
|
||||
TagPath: "image.tag",
|
||||
ReleaseNotes: "https://github.com/keel-hq/keel/releases",
|
||||
},
|
||||
},
|
||||
Plc: policy.NewForcePolicy(false),
|
||||
},
|
||||
},
|
||||
wantShouldUpdateRelease: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "update without force",
|
||||
args: args{
|
||||
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "latest"},
|
||||
namespace: "default",
|
||||
name: "release-1",
|
||||
chart: helloWorldChartPolicyMajor,
|
||||
config: &hapi_chart.Config{Raw: ""},
|
||||
},
|
||||
wantPlan: &UpdatePlan{
|
||||
Namespace: "default",
|
||||
Name: "release-1",
|
||||
Chart: helloWorldChartPolicyMajor,
|
||||
Values: map[string]string{}},
|
||||
wantShouldUpdateRelease: false,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotPlan, gotShouldUpdateRelease, err := checkRelease(tt.args.repo, tt.args.namespace, tt.args.name, tt.args.chart, tt.args.config)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("checkRelease() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotPlan, tt.wantPlan) {
|
||||
t.Errorf("checkRelease() gotPlan = %v, want %v", gotPlan, tt.wantPlan)
|
||||
}
|
||||
if gotShouldUpdateRelease != tt.wantShouldUpdateRelease {
|
||||
t.Errorf("checkRelease() gotShouldUpdateRelease = %v, want %v", gotShouldUpdateRelease, tt.wantShouldUpdateRelease)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_checkRelease(t *testing.T) {
|
||||
|
||||
chartValuesA := `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.1.0
|
||||
|
||||
keel:
|
||||
policy: all
|
||||
trigger: poll
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
|
||||
`
|
||||
// non semver existing
|
||||
chartValuesB := `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: alpha
|
||||
|
||||
keel:
|
||||
policy: force
|
||||
trigger: poll
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
|
||||
`
|
||||
chartValuesNonSemverNoForce := `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: alpha
|
||||
|
||||
keel:
|
||||
policy: major
|
||||
trigger: poll
|
||||
images:
|
||||
- repository: image.repository
|
||||
tag: image.tag
|
||||
`
|
||||
|
||||
chartValuesNoTag := `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world:1.0.0
|
||||
|
||||
keel:
|
||||
policy: major
|
||||
trigger: poll
|
||||
images:
|
||||
- repository: image.repository
|
||||
`
|
||||
|
||||
chartValuesNoKeelCfg := `
|
||||
name: al Rashid
|
||||
where:
|
||||
city: Basrah
|
||||
title: caliph
|
||||
image:
|
||||
repository: gcr.io/v2-namespace/hello-world
|
||||
tag: 1.0.0
|
||||
`
|
||||
|
||||
helloWorldChart := &hapi_chart.Chart{
|
||||
Values: &hapi_chart.Config{Raw: chartValuesA},
|
||||
Metadata: &hapi_chart.Metadata{Name: "app-x"},
|
||||
}
|
||||
|
||||
helloWorldNonSemverChart := &hapi_chart.Chart{
|
||||
Values: &hapi_chart.Config{Raw: chartValuesB},
|
||||
Metadata: &hapi_chart.Metadata{Name: "app-x"},
|
||||
}
|
||||
helloWorldNonSemverNoForceChart := &hapi_chart.Chart{
|
||||
Values: &hapi_chart.Config{Raw: chartValuesNonSemverNoForce},
|
||||
Metadata: &hapi_chart.Metadata{Name: "app-x"},
|
||||
}
|
||||
helloWorldNoTagChart := &hapi_chart.Chart{
|
||||
Values: &hapi_chart.Config{Raw: chartValuesNoTag},
|
||||
Metadata: &hapi_chart.Metadata{Name: "app-x"},
|
||||
}
|
||||
|
||||
helloWorldNoKeelCfg := &hapi_chart.Chart{
|
||||
Values: &hapi_chart.Config{Raw: chartValuesNoKeelCfg},
|
||||
Metadata: &hapi_chart.Metadata{Name: "app-x"},
|
||||
}
|
||||
|
||||
type args struct {
|
||||
repo *types.Repository
|
||||
namespace string
|
||||
name string
|
||||
chart *hapi_chart.Chart
|
||||
config *hapi_chart.Config
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantPlan *UpdatePlan
|
||||
wantShouldUpdateRelease bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "correct version bump",
|
||||
args: args{
|
||||
// newVersion: unsafeGetVersion("1.1.2"),
|
||||
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.2"},
|
||||
namespace: "default",
|
||||
name: "release-1",
|
||||
chart: helloWorldChart,
|
||||
config: &hapi_chart.Config{Raw: ""},
|
||||
},
|
||||
wantPlan: &UpdatePlan{
|
||||
Namespace: "default",
|
||||
Name: "release-1",
|
||||
Chart: helloWorldChart,
|
||||
Values: map[string]string{"image.tag": "1.1.2"},
|
||||
NewVersion: "1.1.2",
|
||||
CurrentVersion: "1.1.0",
|
||||
Config: &KeelChartConfig{
|
||||
Policy: "all",
|
||||
MatchPreRelease: true,
|
||||
Trigger: types.TriggerTypePoll,
|
||||
Images: []ImageDetails{
|
||||
{RepositoryPath: "image.repository", TagPath: "image.tag"},
|
||||
},
|
||||
Plc: policy.NewSemverPolicy(policy.SemverPolicyTypeAll, true),
|
||||
},
|
||||
},
|
||||
wantShouldUpdateRelease: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "correct but same version",
|
||||
args: args{
|
||||
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.0"},
|
||||
namespace: "default",
|
||||
name: "release-1",
|
||||
chart: helloWorldChart,
|
||||
config: &hapi_chart.Config{Raw: ""},
|
||||
},
|
||||
wantPlan: &UpdatePlan{Namespace: "default", Name: "release-1", Chart: helloWorldChart, Values: map[string]string{}},
|
||||
wantShouldUpdateRelease: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "different image",
|
||||
args: args{
|
||||
|
||||
repo: &types.Repository{Name: "gcr.io/v2-namespace/bye-world", Tag: "1.1.5"},
|
||||
namespace: "default",
|
||||
name: "release-1",
|
||||
chart: helloWorldChart,
|
||||
config: &hapi_chart.Config{Raw: ""},
|
||||
},
|
||||
wantPlan: &UpdatePlan{Namespace: "default", Name: "release-1", Chart: helloWorldChart, Values: map[string]string{}},
|
||||
wantShouldUpdateRelease: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "non semver existing version",
|
||||
args: args{
|
||||
|
||||
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.0"},
|
||||
namespace: "default",
|
||||
name: "release-1",
|
||||
chart: helloWorldNonSemverChart,
|
||||
config: &hapi_chart.Config{Raw: ""},
|
||||
},
|
||||
wantPlan: &UpdatePlan{
|
||||
Namespace: "default",
|
||||
Name: "release-1",
|
||||
Chart: helloWorldNonSemverChart,
|
||||
Values: map[string]string{"image.tag": "1.1.0"},
|
||||
NewVersion: "1.1.0",
|
||||
CurrentVersion: "alpha",
|
||||
Config: &KeelChartConfig{
|
||||
Policy: "force",
|
||||
MatchPreRelease: true,
|
||||
Trigger: types.TriggerTypePoll,
|
||||
Images: []ImageDetails{
|
||||
{RepositoryPath: "image.repository", TagPath: "image.tag"},
|
||||
},
|
||||
Plc: policy.NewForcePolicy(false),
|
||||
},
|
||||
},
|
||||
wantShouldUpdateRelease: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "non semver no force, should not add to plan",
|
||||
args: args{
|
||||
|
||||
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.0"},
|
||||
namespace: "default",
|
||||
name: "release-1",
|
||||
chart: helloWorldNonSemverNoForceChart,
|
||||
config: &hapi_chart.Config{Raw: ""},
|
||||
},
|
||||
wantPlan: &UpdatePlan{Namespace: "default", Name: "release-1", Chart: helloWorldNonSemverNoForceChart, Values: map[string]string{}},
|
||||
wantShouldUpdateRelease: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "semver no tag",
|
||||
args: args{
|
||||
|
||||
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.0"},
|
||||
namespace: "default",
|
||||
name: "release-1-no-tag",
|
||||
chart: helloWorldNoTagChart,
|
||||
config: &hapi_chart.Config{Raw: ""},
|
||||
},
|
||||
wantPlan: &UpdatePlan{
|
||||
Namespace: "default",
|
||||
Name: "release-1-no-tag",
|
||||
Chart: helloWorldNoTagChart,
|
||||
Values: map[string]string{"image.repository": "gcr.io/v2-namespace/hello-world:1.1.0"},
|
||||
NewVersion: "1.1.0",
|
||||
CurrentVersion: "1.0.0",
|
||||
Config: &KeelChartConfig{
|
||||
Policy: "major",
|
||||
MatchPreRelease: true,
|
||||
Trigger: types.TriggerTypePoll,
|
||||
Images: []ImageDetails{
|
||||
{RepositoryPath: "image.repository"},
|
||||
},
|
||||
Plc: policy.NewSemverPolicy(policy.SemverPolicyTypeMajor, true),
|
||||
},
|
||||
},
|
||||
wantShouldUpdateRelease: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no keel config",
|
||||
args: args{
|
||||
|
||||
repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.0"},
|
||||
namespace: "default",
|
||||
name: "release-1-no-tag",
|
||||
chart: helloWorldNoKeelCfg,
|
||||
config: &hapi_chart.Config{Raw: ""},
|
||||
},
|
||||
wantPlan: &UpdatePlan{Namespace: "default", Name: "release-1-no-tag", Chart: helloWorldNoKeelCfg, Values: map[string]string{}},
|
||||
wantShouldUpdateRelease: false,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotPlan, gotShouldUpdateRelease, err := checkRelease(tt.args.repo, tt.args.namespace, tt.args.name, tt.args.chart, tt.args.config)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("checkRelease() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotPlan, tt.wantPlan) {
|
||||
t.Errorf("checkRelease() gotPlan = %v, want %v", gotPlan, tt.wantPlan)
|
||||
}
|
||||
if gotShouldUpdateRelease != tt.wantShouldUpdateRelease {
|
||||
t.Errorf("checkRelease() gotShouldUpdateRelease = %v, want %v", gotShouldUpdateRelease, tt.wantShouldUpdateRelease)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,12 +1,13 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/keel-hq/keel/internal/k8s"
|
||||
|
||||
apps_v1 "k8s.io/api/apps/v1"
|
||||
v1beta1 "k8s.io/api/batch/v1beta1"
|
||||
batch_v1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
@ -95,19 +96,19 @@ func (i *KubernetesImplementer) Config() *rest.Config {
|
|||
// Namespaces - get all namespaces
|
||||
func (i *KubernetesImplementer) Namespaces() (*v1.NamespaceList, error) {
|
||||
namespaces := i.client.CoreV1().Namespaces()
|
||||
return namespaces.List(meta_v1.ListOptions{})
|
||||
return namespaces.List(context.TODO(), meta_v1.ListOptions{})
|
||||
}
|
||||
|
||||
// Deployment - get specific deployment for namespace/name
|
||||
func (i *KubernetesImplementer) Deployment(namespace, name string) (*apps_v1.Deployment, error) {
|
||||
dep := i.client.AppsV1().Deployments(namespace)
|
||||
return dep.Get(name, meta_v1.GetOptions{})
|
||||
return dep.Get(context.TODO(), name, meta_v1.GetOptions{})
|
||||
}
|
||||
|
||||
// Deployments - get all deployments for namespace
|
||||
func (i *KubernetesImplementer) Deployments(namespace string) (*apps_v1.DeploymentList, error) {
|
||||
dep := i.client.AppsV1().Deployments(namespace)
|
||||
l, err := dep.List(meta_v1.ListOptions{})
|
||||
l, err := dep.List(context.TODO(), meta_v1.ListOptions{})
|
||||
return l, err
|
||||
}
|
||||
|
||||
|
@ -123,22 +124,22 @@ func (i *KubernetesImplementer) Update(obj *k8s.GenericResource) error {
|
|||
|
||||
switch resource := obj.GetResource().(type) {
|
||||
case *apps_v1.Deployment:
|
||||
_, err := i.client.AppsV1().Deployments(resource.Namespace).Update(resource)
|
||||
_, err := i.client.AppsV1().Deployments(resource.Namespace).Update(context.TODO(), resource, meta_v1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case *apps_v1.StatefulSet:
|
||||
_, err := i.client.AppsV1().StatefulSets(resource.Namespace).Update(resource)
|
||||
_, err := i.client.AppsV1().StatefulSets(resource.Namespace).Update(context.TODO(), resource, meta_v1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case *apps_v1.DaemonSet:
|
||||
_, err := i.client.AppsV1().DaemonSets(resource.Namespace).Update(resource)
|
||||
_, err := i.client.AppsV1().DaemonSets(resource.Namespace).Update(context.TODO(), resource, meta_v1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case *v1beta1.CronJob:
|
||||
_, err := i.client.BatchV1beta1().CronJobs(resource.Namespace).Update(resource)
|
||||
case *batch_v1.CronJob:
|
||||
_, err := i.client.BatchV1().CronJobs(resource.Namespace).Update(context.TODO(), resource, meta_v1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -150,17 +151,17 @@ func (i *KubernetesImplementer) Update(obj *k8s.GenericResource) error {
|
|||
|
||||
// Secret - get secret
|
||||
func (i *KubernetesImplementer) Secret(namespace, name string) (*v1.Secret, error) {
|
||||
return i.client.CoreV1().Secrets(namespace).Get(name, meta_v1.GetOptions{})
|
||||
return i.client.CoreV1().Secrets(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
|
||||
}
|
||||
|
||||
// Pods - get pods
|
||||
func (i *KubernetesImplementer) Pods(namespace, labelSelector string) (*v1.PodList, error) {
|
||||
return i.client.CoreV1().Pods(namespace).List(meta_v1.ListOptions{LabelSelector: labelSelector})
|
||||
return i.client.CoreV1().Pods(namespace).List(context.TODO(), meta_v1.ListOptions{LabelSelector: labelSelector})
|
||||
}
|
||||
|
||||
// DeletePod - delete pod by name
|
||||
func (i *KubernetesImplementer) DeletePod(namespace, name string, opts *meta_v1.DeleteOptions) error {
|
||||
return i.client.CoreV1().Pods(namespace).Delete(name, opts)
|
||||
return i.client.CoreV1().Pods(namespace).Delete(context.TODO(), name, *opts)
|
||||
}
|
||||
|
||||
// ConfigMaps - returns an interface to config maps for a specified namespace
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/keel-hq/keel/provider/helm"
|
||||
"github.com/keel-hq/keel/provider/kubernetes"
|
||||
"github.com/keel-hq/keel/types"
|
||||
|
||||
|
@ -65,19 +64,6 @@ func (g *DefaultGetter) Get(image *types.TrackedImage) (*types.Credentials, erro
|
|||
return creds, nil
|
||||
}
|
||||
|
||||
switch image.Provider {
|
||||
case helm.ProviderName:
|
||||
if len(image.Secrets) == 0 {
|
||||
// looking up secrets based on selector
|
||||
secrets, err := g.lookupSecrets(image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// populating secrets
|
||||
image.Secrets = secrets
|
||||
}
|
||||
}
|
||||
if len(image.Secrets) == 0 {
|
||||
return nil, ErrSecretsNotSpecified
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/keel-hq/keel/provider/helm"
|
||||
"github.com/keel-hq/keel/types"
|
||||
"github.com/keel-hq/keel/util/image"
|
||||
testutil "github.com/keel-hq/keel/util/testing"
|
||||
|
@ -305,62 +304,6 @@ func TestLookupHelmEncodedSecret(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetDirectHelmSecret(t *testing.T) {
|
||||
imgRef, _ := image.Parse("karolisr/webhook-demo:0.0.11")
|
||||
|
||||
impl := &testutil.FakeK8sImplementer{
|
||||
AvailablePods: &v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{
|
||||
ImagePullSecrets: []v1.LocalObjectReference{
|
||||
{
|
||||
Name: "very-secret-dont-look",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
AvailableSecret: map[string]*v1.Secret{
|
||||
"myregistrysecret": {
|
||||
Data: map[string][]byte{
|
||||
dockerConfigKey: []byte(secretDataPayload2),
|
||||
},
|
||||
Type: v1.SecretTypeDockercfg,
|
||||
},
|
||||
"very-secret-dont-look": {
|
||||
Data: map[string][]byte{
|
||||
dockerConfigKey: []byte(secretDataPayload),
|
||||
},
|
||||
Type: v1.SecretTypeDockercfg,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
getter := NewGetter(impl, nil)
|
||||
|
||||
trackedImage := &types.TrackedImage{
|
||||
Image: imgRef,
|
||||
Namespace: "default",
|
||||
Secrets: []string{"myregistrysecret"},
|
||||
Provider: helm.ProviderName,
|
||||
}
|
||||
|
||||
creds, err := getter.Get(trackedImage)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get creds: %s", err)
|
||||
}
|
||||
|
||||
if creds.Username != "foo-user-x-2" {
|
||||
t.Errorf("unexpected username: %s", creds.Username)
|
||||
}
|
||||
|
||||
if creds.Password != "bar-pass-x-2" {
|
||||
t.Errorf("unexpected pass: %s", creds.Password)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupHelmNoSecretsFound(t *testing.T) {
|
||||
imgRef, _ := image.Parse("karolisr/webhook-demo:0.0.11")
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/keel-hq/keel/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
apps_v1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
|
Loading…
Reference in New Issue