Merge pull request #16416 from marcellmartini/feature/issue-16415
Feature: Make kubeadm.applyNodeLabels apply label to all nodespull/17689/head
commit
d8422bf5d7
|
@ -35,6 +35,7 @@ type LogOptions struct {
|
|||
|
||||
// Bootstrapper contains all the methods needed to bootstrap a Kubernetes cluster
|
||||
type Bootstrapper interface {
|
||||
ApplyNodeLabels(config.ClusterConfig) error
|
||||
StartCluster(config.ClusterConfig) error
|
||||
UpdateCluster(config.ClusterConfig) error
|
||||
DeleteCluster(config.KubernetesConfig) error
|
||||
|
|
|
@ -1040,8 +1040,11 @@ func kubectlPath(cfg config.ClusterConfig) string {
|
|||
return path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl")
|
||||
}
|
||||
|
||||
func (k *Bootstrapper) ApplyNodeLabels(cfg config.ClusterConfig) error {
|
||||
return k.applyNodeLabels(cfg)
|
||||
}
|
||||
|
||||
// applyNodeLabels applies minikube labels to all the nodes
|
||||
// but it's currently called only from kubeadm.StartCluster (via kubeadm.init) where there's only one - first node
|
||||
func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error {
|
||||
// time cluster was created. time format is based on ISO 8601 (RFC 3339)
|
||||
// converting - and : to _ because of Kubernetes label restriction
|
||||
|
@ -1053,8 +1056,12 @@ func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error {
|
|||
// ensure that "primary" label is applied only to the 1st node in the cluster (used eg for placing ingress there)
|
||||
// this is used to uniquely distinguish that from other nodes in multi-master/multi-control-plane cluster config
|
||||
primaryLbl := "minikube.k8s.io/primary=false"
|
||||
|
||||
// ensure that "primary" label is not removed when apply label to all others nodes
|
||||
applyToNodes := "-l minikube.k8s.io/primary!=true"
|
||||
if len(cfg.Nodes) <= 1 {
|
||||
primaryLbl = "minikube.k8s.io/primary=true"
|
||||
applyToNodes = "--all"
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), applyTimeoutSeconds*time.Second)
|
||||
|
@ -1062,7 +1069,7 @@ func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error {
|
|||
// example:
|
||||
// sudo /var/lib/minikube/binaries/<version>/kubectl label nodes minikube.k8s.io/version=<version> minikube.k8s.io/commit=aa91f39ffbcf27dcbb93c4ff3f457c54e585cf4a-dirty minikube.k8s.io/name=p1 minikube.k8s.io/updated_at=2020_02_20T12_05_35_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig
|
||||
cmd := exec.CommandContext(ctx, "sudo", kubectlPath(cfg),
|
||||
"label", "nodes", verLbl, commitLbl, nameLbl, createdAtLbl, primaryLbl, "--all", "--overwrite",
|
||||
"label", "nodes", verLbl, commitLbl, nameLbl, createdAtLbl, primaryLbl, applyToNodes, "--overwrite",
|
||||
fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")))
|
||||
|
||||
if _, err := k.c.RunCmd(cmd); err != nil {
|
||||
|
|
|
@ -342,6 +342,9 @@ func joinCluster(starter Starter, cpBs bootstrapper.Bootstrapper, bs bootstrappe
|
|||
return fmt.Errorf("error joining worker node to cluster: %w", err)
|
||||
}
|
||||
|
||||
if err := cpBs.ApplyNodeLabels(*starter.Cfg); err != nil {
|
||||
return fmt.Errorf("error applying node label: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ func validateNodeLabels(ctx context.Context, t *testing.T, profile string) {
|
|||
t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Command(), err)
|
||||
}
|
||||
// docs: check if the node labels matches with the expected Minikube labels: `minikube.k8s.io/*`
|
||||
expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name"}
|
||||
expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name", "minikube.k8s.io/primary"}
|
||||
for _, el := range expectedLabels {
|
||||
if !strings.Contains(rr.Output(), el) {
|
||||
t.Errorf("expected to have label %q in node labels but got : %s", el, rr.Output())
|
||||
|
|
|
@ -55,6 +55,7 @@ func TestMultiNode(t *testing.T) {
|
|||
{"DeployApp2Nodes", validateDeployAppToMultiNode},
|
||||
{"PingHostFrom2Pods", validatePodsPingHost},
|
||||
{"AddNode", validateAddNodeToMultiNode},
|
||||
{"MultiNodeLabels", validateMultiNodeLabels},
|
||||
{"ProfileList", validateProfileListWithMultiNode},
|
||||
{"CopyFile", validateCopyFileWithMultiNode},
|
||||
{"StopNode", validateStopRunningNode},
|
||||
|
@ -204,6 +205,33 @@ func validateCopyFileWithMultiNode(ctx context.Context, t *testing.T, profile st
|
|||
}
|
||||
}
|
||||
|
||||
// validateMultiNodeLabels check if all node labels were configured correctly
|
||||
func validateMultiNodeLabels(ctx context.Context, t *testing.T, profile string) {
|
||||
// docs: Get the node labels from the cluster with `kubectl get nodes`
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "nodes", "-o", "jsonpath=[{range .items[*]}{.metadata.labels},{end}]"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
nodeLabelsList := []map[string]string{}
|
||||
fixedString := strings.Replace(rr.Stdout.String(), ",]", "]", 1)
|
||||
err = json.Unmarshal([]byte(fixedString), &nodeLabelsList)
|
||||
if err != nil {
|
||||
t.Errorf("failed to decode json from label list: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// docs: check if all node labels matches with the expected Minikube labels: `minikube.k8s.io/*`
|
||||
expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name", "minikube.k8s.io/primary"}
|
||||
|
||||
for _, nodeLabels := range nodeLabelsList {
|
||||
for _, el := range expectedLabels {
|
||||
if _, ok := nodeLabels[el]; !ok {
|
||||
t.Errorf("expected to have label %q in node labels but got : %s", el, rr.Output())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// validateStopRunningNode tests the minikube node stop command
|
||||
func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) {
|
||||
// Run minikube node stop on that node
|
||||
|
|
Loading…
Reference in New Issue