Add integration tests for node labels

pull/6717/head
Medya Gh 2020-02-20 13:57:09 -08:00
parent b62c12b7f2
commit c85fe4955d
2 changed files with 42 additions and 13 deletions

View File

@ -491,26 +491,21 @@ func (k *Bootstrapper) applyKicOverlay(cfg config.MachineConfig) error {
// applyNodeLabels applies minikube labels to all the nodes
func (k *Bootstrapper) applyNodeLabels(cfg config.MachineConfig) error {
start := time.Now()
// based on ISO 8601 (RFC 3339) except converting - and : to _ because of kubernetes label restriction
createdAtLbl := "k8s.minikube.io/updated_at=" + start.Format("2006_01_02T15_04_05_0700")
verLbl := "k8s.minikube.io/version=" + version.GetVersion()
commitLbl := "k8s.minikube.io/commit=" + version.GetGitCommitID()
nameLbl := "k8s.minikube.io/name=" + cfg.Name
// timne cluster was created. time format is based on ISO 8601 (RFC 3339)
// converting - and : to _ because of kubernetes label restriction
createdAtLbl := "minikube.k8s.io/updated_at=" + time.Now().Format("2006_01_02T15_04_05_0700")
verLbl := "minikube.k8s.io/version=" + version.GetVersion()
commitLbl := "minikube.k8s.io/commit=" + version.GetGitCommitID()
nameLbl := "minikube.k8s.io/name=" + cfg.Name
// example:
// /var/lib/minikube/binaries/v1.17.3/kubectl label nodes --kubeconfig /var/lib/minikube/kubeconfig k8s.minikube.io/version=1.7.3 --all --overwrite
// sudo /var/lib/minikube/binaries/v1.17.3/kubectl label nodes minikube.k8s.io/version=v1.7.3 minikube.k8s.io/commit=aa91f39ffbcf27dcbb93c4ff3f457c54e585cf4a-dirty minikube.k8s.io/name=p1 minikube.k8s.io/updated_at=2020_02_20T12_05_35_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig
cmd := exec.Command("sudo",
path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"),
"label", "nodes", verLbl, commitLbl, nameLbl, createdAtLbl, "--all", "--overwrite",
fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")))
rr, err := k.c.RunCmd(cmd)
elapsed := time.Since(start)
if elapsed > (1 * time.Second) {
glog.Infof("Completed: %s: (%s)", rr.Command(), elapsed)
}
if err != nil {
if _, err := k.c.RunCmd(cmd); err != nil {
return errors.Wrapf(err, "applying node labels")
}
return nil

View File

@ -112,6 +112,7 @@ func TestFunctional(t *testing.T) {
{"FileSync", validateFileSync},
{"UpdateContextCmd", validateUpdateContextCmd},
{"DockerEnv", validateDockerEnv},
{"NodeLabels", validateNodeLabels},
}
for _, tc := range tests {
tc := tc
@ -123,6 +124,39 @@ func TestFunctional(t *testing.T) {
})
}
// validateNodeLabels checks if minikube cluster is created with correct kubernetes's node label
func validateNodeLabels(ctx context.Context, t *testing.T, profile string) {
mctx, cancel := context.WithTimeout(ctx, 13*time.Second)
defer cancel()
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "nodes", "--output", "jsonpath={.items[0].metadata.labels}"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
}
// json output would look like this
// [beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux minikube.k8s.io/commit:aa91f39ffbcf27dcbb93c4ff3f457c54e585cf4a-dirty minikube.k8s.io/name:p1 minikube.k8s.io/updated_at:2020_02_20T12_05_35_0700 minikube.k8s.io/version:v1.7.3 kubernetes.io/arch:amd64 kubernetes.io/hostname:p1 kubernetes.io/os:linux node-role.kubernetes.io/master:]
var labels []string
err = json.Unmarshal(rr.Stdout.Bytes(), &labels)
if err != nil {
t.Errorf("%s umarshaling node label from json failed: %v", rr.Args, err)
}
expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name"}
for _, el := range expectedLabels {
found := false
for _, l := range labels {
if strings.Contains(l, el) {
found = true
break
}
}
if !found {
t.Errorf("Failed to have label %q in node labels %+v", expectedLabels, labels)
}
}
}
// check functionality of minikube after evaling docker-env
func validateDockerEnv(ctx context.Context, t *testing.T, profile string) {
mctx, cancel := context.WithTimeout(ctx, 13*time.Second)