mirror of https://github.com/k3s-io/k3s.git
Consolidate test utility functions into top level package (#11711)
* [e2e] Convert RunCmdOnNode to method * Consolidate e2e variables into TestConfig struct * Consolidate docker and integration test helper functions * E2E: Directly count daemonsets, not their pods * Add missing Context levels for E2E tests * Migrate e2e.ParsePods to new tests client package * Run the go test compile test on their respective architectures Signed-off-by: Derek Nola <derek.nola@suse.com>pull/11737/head
parent
a2f6657f48
commit
b64b9153ed
|
@ -108,10 +108,10 @@ jobs:
|
|||
|
||||
build-go-tests:
|
||||
name: "Build Go Tests"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
arch: [amd64, arm64]
|
||||
runs-on: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }}
|
||||
outputs:
|
||||
channel: ${{ steps.channel_step.outputs.channel }}
|
||||
steps:
|
||||
|
@ -122,7 +122,7 @@ jobs:
|
|||
- name: Build Go Tests
|
||||
run: |
|
||||
mkdir -p ./dist/artifacts
|
||||
GOOS=linux GOARCH=${{ matrix.arch }} go test -c -ldflags="-w -s" -o ./dist/artifacts ./tests/docker/...
|
||||
go test -c -ldflags="-w -s" -o ./dist/artifacts ./tests/docker/...
|
||||
- name: Upload Go Tests
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
|
|
|
@ -0,0 +1,149 @@
|
|||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/utils/set"
|
||||
)
|
||||
|
||||
// This file consolidates functions that are used across multiple testing frameworks.
|
||||
// Most of it relates to interacting with the Kubernetes API and checking the status of resources.
|
||||
|
||||
// CheckDefaultDeployments checks if the standard array of K3s deployments are ready, otherwise returns an error
|
||||
func CheckDefaultDeployments(kubeconfigFile string) error {
|
||||
return CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, kubeconfigFile)
|
||||
}
|
||||
|
||||
// CheckDeployments checks if the provided list of deployments are ready, otherwise returns an error
|
||||
func CheckDeployments(deployments []string, kubeconfigFile string) error {
|
||||
|
||||
deploymentSet := make(map[string]bool)
|
||||
for _, d := range deployments {
|
||||
deploymentSet[d] = false
|
||||
}
|
||||
|
||||
client, err := K8sClient(kubeconfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deploymentList, err := client.AppsV1().Deployments("").List(context.Background(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, deployment := range deploymentList.Items {
|
||||
if _, ok := deploymentSet[deployment.Name]; ok && deployment.Status.ReadyReplicas == deployment.Status.Replicas {
|
||||
deploymentSet[deployment.Name] = true
|
||||
}
|
||||
}
|
||||
for d, found := range deploymentSet {
|
||||
if !found {
|
||||
return fmt.Errorf("failed to deploy %s", d)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseNodes(kubeconfigFile string) ([]corev1.Node, error) {
|
||||
clientSet, err := K8sClient(kubeconfigFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes, err := clientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nodes.Items, nil
|
||||
}
|
||||
|
||||
func ParsePods(kubeconfigFile string) ([]corev1.Pod, error) {
|
||||
clientSet, err := K8sClient(kubeconfigFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods, err := clientSet.CoreV1().Pods("").List(context.Background(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pods.Items, nil
|
||||
}
|
||||
|
||||
// AllPodsUp checks if pods on the cluster are Running or Succeeded, otherwise returns an error
|
||||
func AllPodsUp(kubeconfigFile string) error {
|
||||
clientSet, err := K8sClient(kubeconfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pods, err := clientSet.CoreV1().Pods("").List(context.Background(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
// Check if the pod is running
|
||||
if pod.Status.Phase != corev1.PodRunning && pod.Status.Phase != corev1.PodSucceeded {
|
||||
return fmt.Errorf("pod %s is %s", pod.Name, pod.Status.Phase)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PodReady checks if a pod is ready by querying its status
|
||||
func PodReady(podName, namespace, kubeconfigFile string) (bool, error) {
|
||||
clientSet, err := K8sClient(kubeconfigFile)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
pod, err := clientSet.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pod: %v", err)
|
||||
}
|
||||
// Check if the pod is running
|
||||
for _, containerStatus := range pod.Status.ContainerStatuses {
|
||||
if containerStatus.Name == podName && containerStatus.Ready {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Checks if provided nodes are ready, otherwise returns an error
|
||||
func NodesReady(kubeconfigFile string, nodeNames []string) error {
|
||||
nodes, err := ParseNodes(kubeconfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodesToCheck := set.New(nodeNames...)
|
||||
readyNodes := make(set.Set[string], 0)
|
||||
for _, node := range nodes {
|
||||
for _, condition := range node.Status.Conditions {
|
||||
if condition.Type == corev1.NodeReady && condition.Status != corev1.ConditionTrue {
|
||||
return fmt.Errorf("node %s is not ready", node.Name)
|
||||
}
|
||||
readyNodes.Insert(node.Name)
|
||||
}
|
||||
}
|
||||
// Check if all nodes are ready
|
||||
if !nodesToCheck.Equal(readyNodes) {
|
||||
return fmt.Errorf("expected nodes %v, found %v", nodesToCheck, readyNodes)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func K8sClient(kubeconfigFile string) (*kubernetes.Clientset, error) {
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientSet, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return clientSet, nil
|
||||
}
|
|
@ -7,6 +7,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
tester "github.com/k3s-io/k3s/tests/docker"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -31,10 +32,10 @@ var _ = Describe("Basic Tests", Ordered, func() {
|
|||
Expect(config.ProvisionServers(1)).To(Succeed())
|
||||
Expect(config.ProvisionAgents(1)).To(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.NodesReady(config.KubeconfigFile, config.GetNodeNames())
|
||||
return tests.NodesReady(config.KubeconfigFile, config.GetNodeNames())
|
||||
}, "40s", "5s").Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
@ -46,7 +47,7 @@ var _ = Describe("Basic Tests", Ordered, func() {
|
|||
})
|
||||
It("should validate local storage volume", func() {
|
||||
Eventually(func() (bool, error) {
|
||||
return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile)
|
||||
return tests.PodReady("volume-test", "kube-system", config.KubeconfigFile)
|
||||
}, "20s", "5s").Should(BeTrue())
|
||||
})
|
||||
})
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
tester "github.com/k3s-io/k3s/tests/docker"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -28,7 +29,7 @@ var _ = Describe("Boostrap Token Tests", Ordered, func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(config.ProvisionServers(1)).To(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
@ -46,10 +47,10 @@ var _ = Describe("Boostrap Token Tests", Ordered, func() {
|
|||
config.Token = newSecret
|
||||
Expect(config.ProvisionAgents(1)).To(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := tester.ParseNodes(config.KubeconfigFile)
|
||||
nodes, err := tests.ParseNodes(config.KubeconfigFile)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(nodes).To(HaveLen(2))
|
||||
g.Expect(tester.NodesReady(config.KubeconfigFile, config.GetNodeNames())).To(Succeed())
|
||||
g.Expect(tests.NodesReady(config.KubeconfigFile, config.GetNodeNames())).To(Succeed())
|
||||
}, "40s", "5s").Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
tester "github.com/k3s-io/k3s/tests/docker"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -62,7 +63,7 @@ var _ = Describe("CA Certs Tests", Ordered, func() {
|
|||
Expect(config.ProvisionServers(1)).To(Succeed())
|
||||
Expect(config.ProvisionAgents(1)).To(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
tester "github.com/k3s-io/k3s/tests/docker"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -30,10 +31,10 @@ var _ = Describe("Etcd Tests", Ordered, func() {
|
|||
It("should provision servers", func() {
|
||||
Expect(config.ProvisionServers(3)).To(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.NodesReady(config.KubeconfigFile, config.GetNodeNames())
|
||||
return tests.NodesReady(config.KubeconfigFile, config.GetNodeNames())
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
})
|
||||
It("should destroy the cluster", func() {
|
||||
|
@ -56,10 +57,10 @@ var _ = Describe("Etcd Tests", Ordered, func() {
|
|||
Expect(config.ProvisionServers(5)).To(Succeed())
|
||||
Expect(config.ProvisionAgents(1)).To(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
}, "90s", "5s").Should(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.NodesReady(config.KubeconfigFile, config.GetNodeNames())
|
||||
return tests.NodesReady(config.KubeconfigFile, config.GetNodeNames())
|
||||
}, "90s", "5s").Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
tester "github.com/k3s-io/k3s/tests/docker"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -30,10 +31,10 @@ var _ = Describe("LazyPull Tests", Ordered, func() {
|
|||
config.ServerYaml = "snapshotter: stargz"
|
||||
Expect(config.ProvisionServers(1)).To(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.NodesReady(config.KubeconfigFile, config.GetNodeNames())
|
||||
return tests.NodesReady(config.KubeconfigFile, config.GetNodeNames())
|
||||
}, "40s", "5s").Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
@ -45,7 +46,7 @@ var _ = Describe("LazyPull Tests", Ordered, func() {
|
|||
})
|
||||
It("should have the pod come up", func() {
|
||||
Eventually(func() (bool, error) {
|
||||
return tester.PodReady("stargz-snapshot-test", "default", config.KubeconfigFile)
|
||||
return tests.PodReady("stargz-snapshot-test", "default", config.KubeconfigFile)
|
||||
}, "30s", "5s").Should(BeTrue())
|
||||
})
|
||||
var topLayer string
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
tester "github.com/k3s-io/k3s/tests/docker"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -62,7 +63,7 @@ var _ = Describe("Skew Tests", Ordered, func() {
|
|||
config.K3sImage = "rancher/k3s:" + lastMinorVersion
|
||||
Expect(config.ProvisionAgents(1)).To(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
})
|
||||
It("should match respective versions", func() {
|
||||
|
@ -85,7 +86,7 @@ var _ = Describe("Skew Tests", Ordered, func() {
|
|||
Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest")
|
||||
|
||||
Eventually(func() (bool, error) {
|
||||
return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile)
|
||||
return tests.PodReady("volume-test", "kube-system", config.KubeconfigFile)
|
||||
}, "20s", "5s").Should(BeTrue())
|
||||
})
|
||||
It("should destroy the cluster", func() {
|
||||
|
@ -103,11 +104,11 @@ var _ = Describe("Skew Tests", Ordered, func() {
|
|||
config.K3sImage = *k3sImage
|
||||
Expect(config.ProvisionServers(3)).To(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(tester.ParseNodes(config.KubeconfigFile)).To(HaveLen(3))
|
||||
g.Expect(tester.NodesReady(config.KubeconfigFile, config.GetNodeNames())).To(Succeed())
|
||||
g.Expect(tests.ParseNodes(config.KubeconfigFile)).To(HaveLen(3))
|
||||
g.Expect(tests.NodesReady(config.KubeconfigFile, config.GetNodeNames())).To(Succeed())
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
})
|
||||
It("should match respective versions", func() {
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
tester "github.com/k3s-io/k3s/tests/docker"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -35,10 +36,10 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
|
|||
Expect(config.ProvisionServers(*serverCount)).To(Succeed())
|
||||
Expect(config.ProvisionAgents(*agentCount)).To(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.CheckDefaultDeployments(config.KubeconfigFile)
|
||||
return tests.CheckDefaultDeployments(config.KubeconfigFile)
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.NodesReady(config.KubeconfigFile, config.GetNodeNames())
|
||||
return tests.NodesReady(config.KubeconfigFile, config.GetNodeNames())
|
||||
}, "40s", "5s").Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
@ -135,12 +136,12 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
|
|||
It("Checks that all nodes and pods are ready", func() {
|
||||
By("Fetching node status")
|
||||
Eventually(func() error {
|
||||
return tester.NodesReady(config.KubeconfigFile, config.GetNodeNames())
|
||||
return tests.NodesReady(config.KubeconfigFile, config.GetNodeNames())
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
|
||||
By("Fetching Pods status")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := tester.ParsePods(config.KubeconfigFile)
|
||||
pods, err := tests.ParsePods(config.KubeconfigFile)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
|
@ -15,11 +14,6 @@ import (
|
|||
|
||||
"golang.org/x/mod/semver"
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/utils/set"
|
||||
)
|
||||
|
||||
type TestConfig struct {
|
||||
|
@ -491,121 +485,3 @@ func (config TestConfig) DeployWorkload(workload string) (string, error) {
|
|||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// TODO the below functions are duplicated in the integration test utils. Consider combining into commmon package
|
||||
|
||||
// CheckDefaultDeployments checks if the default deployments: coredns, local-path-provisioner, metrics-server, traefik
|
||||
// for K3s are ready, otherwise returns an error
|
||||
func CheckDefaultDeployments(kubeconfigFile string) error {
|
||||
return DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, kubeconfigFile)
|
||||
}
|
||||
|
||||
// DeploymentsReady checks if the provided list of deployments are ready, otherwise returns an error
|
||||
func DeploymentsReady(deployments []string, kubeconfigFile string) error {
|
||||
|
||||
deploymentSet := make(map[string]bool)
|
||||
for _, d := range deployments {
|
||||
deploymentSet[d] = false
|
||||
}
|
||||
|
||||
client, err := k8sClient(kubeconfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deploymentList, err := client.AppsV1().Deployments("").List(context.Background(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, deployment := range deploymentList.Items {
|
||||
if _, ok := deploymentSet[deployment.Name]; ok && deployment.Status.ReadyReplicas == deployment.Status.Replicas {
|
||||
deploymentSet[deployment.Name] = true
|
||||
}
|
||||
}
|
||||
for d, found := range deploymentSet {
|
||||
if !found {
|
||||
return fmt.Errorf("failed to deploy %s", d)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseNodes(kubeconfigFile string) ([]corev1.Node, error) {
|
||||
clientSet, err := k8sClient(kubeconfigFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes, err := clientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nodes.Items, nil
|
||||
}
|
||||
|
||||
func ParsePods(kubeconfigFile string) ([]corev1.Pod, error) {
|
||||
clientSet, err := k8sClient(kubeconfigFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods, err := clientSet.CoreV1().Pods("").List(context.Background(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pods.Items, nil
|
||||
}
|
||||
|
||||
// PodReady checks if a pod is ready by querying its status
|
||||
func PodReady(podName, namespace, kubeconfigFile string) (bool, error) {
|
||||
clientSet, err := k8sClient(kubeconfigFile)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
pod, err := clientSet.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pod: %v", err)
|
||||
}
|
||||
// Check if the pod is running
|
||||
for _, containerStatus := range pod.Status.ContainerStatuses {
|
||||
if containerStatus.Name == podName && containerStatus.Ready {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Checks if provided nodes are ready, otherwise returns an error
|
||||
func NodesReady(kubeconfigFile string, nodeNames []string) error {
|
||||
nodes, err := ParseNodes(kubeconfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodesToCheck := set.New(nodeNames...)
|
||||
readyNodes := make(set.Set[string], 0)
|
||||
for _, node := range nodes {
|
||||
for _, condition := range node.Status.Conditions {
|
||||
if condition.Type == corev1.NodeReady && condition.Status != corev1.ConditionTrue {
|
||||
return fmt.Errorf("node %s is not ready", node.Name)
|
||||
}
|
||||
readyNodes.Insert(node.Name)
|
||||
}
|
||||
}
|
||||
// Check if all nodes are ready
|
||||
if !nodesToCheck.Equal(readyNodes) {
|
||||
return fmt.Errorf("expected nodes %v, found %v", nodesToCheck, readyNodes)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func k8sClient(kubeconfigFile string) (*kubernetes.Clientset, error) {
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientSet, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return clientSet, nil
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
tester "github.com/k3s-io/k3s/tests/docker"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -69,7 +70,7 @@ var _ = Describe("Upgrade Tests", Ordered, func() {
|
|||
Expect(config.ProvisionServers(numServers)).To(Succeed())
|
||||
Expect(config.ProvisionAgents(numAgents)).To(Succeed())
|
||||
Eventually(func() error {
|
||||
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
})
|
||||
It("should confirm latest version", func() {
|
||||
|
@ -84,7 +85,7 @@ var _ = Describe("Upgrade Tests", Ordered, func() {
|
|||
Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest")
|
||||
|
||||
Eventually(func() (bool, error) {
|
||||
return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile)
|
||||
return tests.PodReady("volume-test", "kube-system", config.KubeconfigFile)
|
||||
}, "20s", "5s").Should(BeTrue())
|
||||
})
|
||||
It("should upgrade to current commit build", func() {
|
||||
|
@ -111,7 +112,7 @@ var _ = Describe("Upgrade Tests", Ordered, func() {
|
|||
Expect(config.ProvisionAgents(numAgents)).To(Succeed())
|
||||
|
||||
Eventually(func() error {
|
||||
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
})
|
||||
It("should confirm commit version", func() {
|
||||
|
@ -131,7 +132,7 @@ var _ = Describe("Upgrade Tests", Ordered, func() {
|
|||
})
|
||||
It("should confirm test pod is still Running", func() {
|
||||
Eventually(func() (bool, error) {
|
||||
return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile)
|
||||
return tests.PodReady("volume-test", "kube-system", config.KubeconfigFile)
|
||||
}, "20s", "5s").Should(BeTrue())
|
||||
})
|
||||
|
||||
|
|
|
@ -2,11 +2,10 @@ package autoimport
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -32,11 +31,7 @@ func Test_E2EAutoImport(t *testing.T) {
|
|||
RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
|
@ -45,133 +40,121 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
})
|
||||
|
||||
It("Checks Node and Pod Status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
It("Checks node and pod status", func() {
|
||||
By("Fetching Nodes status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("Create a folder in agent/images", func() {
|
||||
cmd := `mkdir /var/lib/rancher/k3s/agent/images`
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
})
|
||||
|
||||
It("Create file for auto import and search in the image store", func() {
|
||||
cmd := `echo docker.io/library/redis:latest | sudo tee /var/lib/rancher/k3s/agent/images/testautoimport.txt`
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := `k3s ctr images list | grep library/redis`
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Change name for the file and see if the label is still pinned", func() {
|
||||
cmd := `mv /var/lib/rancher/k3s/agent/images/testautoimport.txt /var/lib/rancher/k3s/agent/images/testautoimportrename.txt`
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := `k3s ctr images list | grep library/redis`
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Create, remove and create again a file", func() {
|
||||
cmd := `echo docker.io/library/busybox:latest | sudo tee /var/lib/rancher/k3s/agent/images/bb.txt`
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := `k3s ctr images list | grep library/busybox`
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
|
||||
cmd = `rm /var/lib/rancher/k3s/agent/images/bb.txt`
|
||||
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err = tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := `k3s ctr images list | grep library/busybox`
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
|
||||
cmd = `echo docker.io/library/busybox:latest | sudo tee /var/lib/rancher/k3s/agent/images/bb.txt`
|
||||
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err = tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := `k3s ctr images list | grep library/busybox`
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Move the folder, add a image and then see if the image is going to be pinned", func() {
|
||||
cmd := `mv /var/lib/rancher/k3s/agent/images /var/lib/rancher/k3s/agent/test`
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
cmd = `echo 'docker.io/library/mysql:latest' | sudo tee /var/lib/rancher/k3s/agent/test/mysql.txt`
|
||||
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err = tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
cmd = `mv /var/lib/rancher/k3s/agent/test /var/lib/rancher/k3s/agent/images`
|
||||
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err = tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := `k3s ctr images list | grep library/mysql`
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Restarts normally", func() {
|
||||
errRestart := e2e.RestartCluster(append(serverNodeNames, agentNodeNames...))
|
||||
errRestart := e2e.RestartCluster(append(tc.Servers, tc.Agents...))
|
||||
Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
|
@ -182,29 +165,29 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
It("Verify bb.txt image and see if are pinned", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := `k3s ctr images list | grep library/busybox`
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Removes bb.txt file", func() {
|
||||
cmd := `rm /var/lib/rancher/k3s/agent/images/bb.txt`
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := `k3s ctr images list | grep library/busybox`
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Restarts normally", func() {
|
||||
errRestart := e2e.RestartCluster(append(serverNodeNames, agentNodeNames...))
|
||||
errRestart := e2e.RestartCluster(append(tc.Servers, tc.Agents...))
|
||||
Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
|
@ -215,8 +198,8 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
It("Verify if bb.txt image is unpinned", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := `k3s ctr images list | grep library/busybox`
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).ShouldNot(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).ShouldNot(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).ShouldNot(ContainSubstring("io.cattle.k3s.pinned=pinned"))
|
||||
g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).ShouldNot(ContainSubstring("io.cri-containerd.pinned=pinned"))
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
|
@ -231,10 +214,10 @@ var _ = AfterEach(func() {
|
|||
var _ = AfterSuite(func() {
|
||||
|
||||
if !failed {
|
||||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -2,11 +2,10 @@ package rotateca
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -25,10 +24,7 @@ func Test_E2EBtrfsSnapshot(t *testing.T) {
|
|||
RunSpecs(t, "Btrfs Snapshot Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
|
@ -38,45 +34,34 @@ var _ = Describe("Verify that btrfs based servers work", Ordered, func() {
|
|||
var err error
|
||||
// OS and server are hardcoded because only openSUSE Leap 15.5 natively supports Btrfs
|
||||
if *local {
|
||||
serverNodeNames, _, err = e2e.CreateLocalCluster("opensuse/Leap-15.6.x86_64", 1, 0)
|
||||
tc, err = e2e.CreateLocalCluster("opensuse/Leap-15.6.x86_64", 1, 0)
|
||||
} else {
|
||||
serverNodeNames, _, err = e2e.CreateCluster("opensuse/Leap-15.6.x86_64", 1, 0)
|
||||
tc, err = e2e.CreateCluster("opensuse/Leap-15.6.x86_64", 1, 0)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("CLUSTER CONFIG")
|
||||
By(tc.Status())
|
||||
})
|
||||
|
||||
It("Checks node and pod status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
By("Fetching Nodes status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
|
||||
fmt.Printf("\nFetching pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
It("Checks that btrfs snapshots exist", func() {
|
||||
cmd := "btrfs subvolume list /var/lib/rancher/k3s/agent/containerd/io.containerd.snapshotter.v1.btrfs"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).To(MatchRegexp("agent/containerd/io.containerd.snapshotter.v1.btrfs/active/\\d+"))
|
||||
Expect(res).To(MatchRegexp("agent/containerd/io.containerd.snapshotter.v1.btrfs/snapshots/\\d+"))
|
||||
|
@ -91,10 +76,10 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
Expect(e2e.SaveJournalLogs(serverNodeNames)).To(Succeed())
|
||||
Expect(e2e.SaveJournalLogs(tc.Servers)).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -27,165 +28,151 @@ func Test_E2EDualStack(t *testing.T) {
|
|||
RunSpecs(t, "DualStack Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
var _ = Describe("Verify DualStack Configuration", Ordered, func() {
|
||||
|
||||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Checks Node Status", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
Context("Cluster Deploys with both IPv6 and IPv4 networks", func() {
|
||||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, err := e2e.ParseNodes(kubeConfigFile, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
tc.Hardened = *hardened
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
})
|
||||
|
||||
It("Checks Pod Status", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
It("Checks Node Status", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, err := e2e.ParsePods(kubeConfigFile, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Verifies that each node has IPv4 and IPv6", func() {
|
||||
nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodeIPs {
|
||||
Expect(node.IPv4).Should(ContainSubstring("10.10.10"))
|
||||
Expect(node.IPv6).Should(ContainSubstring("fd11:decf:c0ff"))
|
||||
}
|
||||
})
|
||||
It("Verifies that each pod has IPv4 and IPv6", func() {
|
||||
podIPs, err := e2e.GetPodIPs(kubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range podIPs {
|
||||
Expect(pod.IPv4).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.42.")), pod.Name)
|
||||
Expect(pod.IPv6).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:42")), pod.Name)
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies ClusterIP Service", func() {
|
||||
_, err := e2e.DeployWorkload("dualstack_clusterip.yaml", kubeConfigFile, *hardened)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "120s", "5s").Should(ContainSubstring("ds-clusterip-pod"))
|
||||
|
||||
// Checks both IPv4 and IPv6
|
||||
clusterips, err := e2e.FetchClusterIP(kubeConfigFile, "ds-clusterip-svc", true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, ip := range strings.Split(clusterips, ",") {
|
||||
if strings.Contains(ip, "::") {
|
||||
ip = "[" + ip + "]"
|
||||
}
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, err := e2e.ParseNodes(tc.KubeConfigFile, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if !strings.HasPrefix(pod.Name, "ds-clusterip-pod") {
|
||||
continue
|
||||
}
|
||||
cmd := fmt.Sprintf("curl -L --insecure http://%s", ip)
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
}, "60s", "5s").Should(ContainSubstring("Welcome to nginx!"), "failed cmd: "+cmd)
|
||||
}
|
||||
}
|
||||
})
|
||||
It("Verifies Ingress", func() {
|
||||
_, err := e2e.DeployWorkload("dualstack_ingress.yaml", kubeConfigFile, *hardened)
|
||||
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
|
||||
cmd := "kubectl get ingress ds-ingress --kubeconfig=" + kubeConfigFile + " -o jsonpath=\"{.spec.rules[*].host}\""
|
||||
hostName, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
|
||||
nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
|
||||
for _, node := range nodeIPs {
|
||||
cmd := fmt.Sprintf("curl --header host:%s http://%s/name.html", hostName, node.IPv4)
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "10s", "2s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd)
|
||||
cmd = fmt.Sprintf("curl --header host:%s http://[%s]/name.html", hostName, node.IPv6)
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "5s", "1s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
It("Verifies NodePort Service", func() {
|
||||
_, err := e2e.DeployWorkload("dualstack_nodeport.yaml", kubeConfigFile, *hardened)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
cmd := "kubectl get service ds-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
|
||||
nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodeIPs {
|
||||
cmd = "curl -L --insecure http://" + node.IPv4 + ":" + nodeport + "/name.html"
|
||||
It("Checks pod status", func() {
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("Verifies that each node has IPv4 and IPv6", func() {
|
||||
nodeIPs, err := e2e.GetNodeIPs(tc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodeIPs {
|
||||
Expect(node.IPv4).Should(ContainSubstring("10.10.10"))
|
||||
Expect(node.IPv6).Should(ContainSubstring("fd11:decf:c0ff"))
|
||||
}
|
||||
})
|
||||
It("Verifies that each pod has IPv4 and IPv6", func() {
|
||||
podIPs, err := e2e.GetPodIPs(tc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range podIPs {
|
||||
Expect(pod.IPv4).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.42.")), pod.Name)
|
||||
Expect(pod.IPv6).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:42")), pod.Name)
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies ClusterIP Service", func() {
|
||||
_, err := tc.DeployWorkload("dualstack_clusterip.yaml")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "120s", "5s").Should(ContainSubstring("ds-clusterip-pod"))
|
||||
|
||||
// Checks both IPv4 and IPv6
|
||||
clusterips, err := e2e.FetchClusterIP(tc.KubeConfigFile, "ds-clusterip-svc", true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, ip := range strings.Split(clusterips, ",") {
|
||||
if strings.Contains(ip, "::") {
|
||||
ip = "[" + ip + "]"
|
||||
}
|
||||
pods, err := tests.ParsePods(tc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if !strings.HasPrefix(pod.Name, "ds-clusterip-pod") {
|
||||
continue
|
||||
}
|
||||
cmd := fmt.Sprintf("curl -L --insecure http://%s", ip)
|
||||
Eventually(func() (string, error) {
|
||||
return tc.Servers[0].RunCmdOnNode(cmd)
|
||||
}, "60s", "5s").Should(ContainSubstring("Welcome to nginx!"), "failed cmd: "+cmd)
|
||||
}
|
||||
}
|
||||
})
|
||||
It("Verifies Ingress", func() {
|
||||
_, err := tc.DeployWorkload("dualstack_ingress.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
|
||||
cmd := "kubectl get ingress ds-ingress -o jsonpath=\"{.spec.rules[*].host}\""
|
||||
hostName, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
|
||||
nodeIPs, err := e2e.GetNodeIPs(tc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
|
||||
for _, node := range nodeIPs {
|
||||
cmd := fmt.Sprintf("curl --header host:%s http://%s/name.html", hostName, node.IPv4)
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "10s", "2s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd)
|
||||
cmd = fmt.Sprintf("curl --header host:%s http://[%s]/name.html", hostName, node.IPv6)
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "5s", "1s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies NodePort Service", func() {
|
||||
_, err := tc.DeployWorkload("dualstack_nodeport.yaml")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
cmd := "kubectl get service ds-nodeport-svc --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
|
||||
nodeIPs, err := e2e.GetNodeIPs(tc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodeIPs {
|
||||
cmd = "curl -L --insecure http://" + node.IPv4 + ":" + nodeport + "/name.html"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd)
|
||||
cmd = "curl -L --insecure http://[" + node.IPv6 + "]:" + nodeport + "/name.html"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
It("Verifies podSelector Network Policy", func() {
|
||||
_, err := tc.DeployWorkload("pod_client.yaml")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
cmd := "kubectl exec svc/client-curl -- curl -m7 ds-clusterip-svc/name.html"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd)
|
||||
cmd = "curl -L --insecure http://[" + node.IPv6 + "]:" + nodeport + "/name.html"
|
||||
}, "20s", "3s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd)
|
||||
_, err = tc.DeployWorkload("netpol-fail.yaml")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
cmd = "kubectl exec svc/client-curl -- curl -m7 ds-clusterip-svc/name.html"
|
||||
Eventually(func() error {
|
||||
_, err = e2e.RunCommand(cmd)
|
||||
Expect(err).To(HaveOccurred())
|
||||
return err
|
||||
}, "20s", "3s")
|
||||
_, err = tc.DeployWorkload("netpol-work.yaml")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
cmd = "kubectl exec svc/client-curl -- curl -m7 ds-clusterip-svc/name.html"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
It("Verifies podSelector Network Policy", func() {
|
||||
_, err := e2e.DeployWorkload("pod_client.yaml", kubeConfigFile, *hardened)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
cmd := "kubectl exec svc/client-curl --kubeconfig=" + kubeConfigFile + " -- curl -m7 ds-clusterip-svc/name.html"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "20s", "3s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd)
|
||||
_, err = e2e.DeployWorkload("netpol-fail.yaml", kubeConfigFile, *hardened)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
cmd = "kubectl exec svc/client-curl --kubeconfig=" + kubeConfigFile + " -- curl -m7 ds-clusterip-svc/name.html"
|
||||
Eventually(func() error {
|
||||
_, err = e2e.RunCommand(cmd)
|
||||
Expect(err).To(HaveOccurred())
|
||||
return err
|
||||
}, "20s", "3s")
|
||||
_, err = e2e.DeployWorkload("netpol-work.yaml", kubeConfigFile, *hardened)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
cmd = "kubectl exec svc/client-curl --kubeconfig=" + kubeConfigFile + " -- curl -m7 ds-clusterip-svc/name.html"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "20s", "3s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd)
|
||||
}, "20s", "3s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -196,12 +183,12 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...)))
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(tc.Servers, tc.Agents...)))
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -4,9 +4,9 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -32,11 +32,7 @@ func Test_E2EPrivateRegistry(t *testing.T) {
|
|||
RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
|
@ -45,42 +41,31 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
|
||||
})
|
||||
It("Checks Node and Pod Status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
It("Checks node and pod status", func() {
|
||||
By("Fetching Nodes status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
By("Fetching pod status")
|
||||
Eventually(func() error {
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "10s").Should(Succeed())
|
||||
})
|
||||
It("Should create and validate deployment with embedded registry mirror using image tag", func() {
|
||||
res, err := e2e.RunCommand("kubectl create deployment my-webpage-1 --image=docker.io/library/nginx:1.25.3")
|
||||
|
@ -128,7 +113,7 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Should expose embedded registry metrics", func() {
|
||||
grepCmd := fmt.Sprintf("kubectl get --raw /api/v1/nodes/%s/proxy/metrics | grep -F 'spegel_advertised_images{registry=\"docker.io\"}'", serverNodeNames[0])
|
||||
grepCmd := fmt.Sprintf("kubectl get --raw /api/v1/nodes/%s/proxy/metrics | grep -F 'spegel_advertised_images{registry=\"docker.io\"}'", tc.Servers[0])
|
||||
res, err := e2e.RunCommand(grepCmd)
|
||||
fmt.Println(res)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -147,12 +132,12 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
Expect(e2e.SaveJournalLogs(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.SaveJournalLogs(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -7,11 +7,11 @@ package externalip
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -21,7 +21,6 @@ import (
|
|||
var nodeOS = flag.String("nodeOS", "bento/ubuntu-24.04", "VM operating system")
|
||||
var serverCount = flag.Int("serverCount", 1, "number of server nodes")
|
||||
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
|
||||
var hardened = flag.Bool("hardened", false, "true or false")
|
||||
var ci = flag.Bool("ci", false, "running on CI")
|
||||
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
|
||||
|
||||
|
@ -55,107 +54,93 @@ func Test_E2EExternalIP(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
var _ = Describe("Verify External-IP config", Ordered, func() {
|
||||
|
||||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Checks Node Status", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
Context("Cluster comes up with External-IP configuration", func() {
|
||||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, err := e2e.ParseNodes(kubeConfigFile, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
})
|
||||
|
||||
It("Checks Pod Status", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
It("Checks Node Status", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, err := e2e.ParseNodes(tc.KubeConfigFile, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Checks pod status", func() {
|
||||
By("Fetching pod status")
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "10s").Should(Succeed())
|
||||
})
|
||||
})
|
||||
Context("Deploy workloads to check cluster connectivity of the nodes", func() {
|
||||
It("Verifies that each node has vagrant IP", func() {
|
||||
nodeIPs, err := e2e.GetNodeIPs(tc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodeIPs {
|
||||
Expect(node.IPv4).Should(ContainSubstring("10.10."))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, err := e2e.ParsePods(kubeConfigFile, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
It("Verifies that each pod has vagrant IP or clusterCIDR IP", func() {
|
||||
podIPs, err := e2e.GetPodIPs(tc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range podIPs {
|
||||
Expect(pod.IPv4).Should(Or(ContainSubstring("10.10."), ContainSubstring("10.42.")), pod.Name)
|
||||
}
|
||||
})
|
||||
It("Verifies that flannel added the correct annotation for the external-ip", func() {
|
||||
nodeIPs, err := getExternalIPs(tc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, annotation := range nodeIPs {
|
||||
Expect(annotation).Should(ContainSubstring("10.100.100."))
|
||||
}
|
||||
})
|
||||
It("Verifies internode connectivity over the tunnel", func() {
|
||||
_, err := tc.DeployWorkload("pod_client.yaml")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
It("Verifies that each node has vagrant IP", func() {
|
||||
nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodeIPs {
|
||||
Expect(node.IPv4).Should(ContainSubstring("10.10."))
|
||||
}
|
||||
})
|
||||
It("Verifies that each pod has vagrant IP or clusterCIDR IP", func() {
|
||||
podIPs, err := e2e.GetPodIPs(kubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range podIPs {
|
||||
Expect(pod.IPv4).Should(Or(ContainSubstring("10.10."), ContainSubstring("10.42.")), pod.Name)
|
||||
}
|
||||
})
|
||||
It("Verifies that flannel added the correct annotation for the external-ip", func() {
|
||||
nodeIPs, err := getExternalIPs(kubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, annotation := range nodeIPs {
|
||||
Expect(annotation).Should(ContainSubstring("10.100.100."))
|
||||
}
|
||||
})
|
||||
It("Verifies internode connectivity over the tunnel", func() {
|
||||
_, err := e2e.DeployWorkload("pod_client.yaml", kubeConfigFile, *hardened)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Wait for the pod_client to have an IP
|
||||
Eventually(func() string {
|
||||
ips, _ := getClientIPs(tc.KubeConfigFile)
|
||||
return ips[0].IPv4
|
||||
}, "40s", "5s").Should(ContainSubstring("10.42"), "failed getClientIPs")
|
||||
|
||||
// Wait for the pod_client to have an IP
|
||||
Eventually(func() string {
|
||||
ips, _ := getClientIPs(kubeConfigFile)
|
||||
return ips[0].IPv4
|
||||
}, "40s", "5s").Should(ContainSubstring("10.42"), "failed getClientIPs")
|
||||
|
||||
clientIPs, err := getClientIPs(kubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, ip := range clientIPs {
|
||||
cmd := "kubectl exec svc/client-curl --kubeconfig=" + kubeConfigFile + " -- curl -m7 " + ip.IPv4 + "/name.html"
|
||||
clientIPs, err := getClientIPs(tc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, ip := range clientIPs {
|
||||
cmd := "kubectl exec svc/client-curl -- curl -m7 " + ip.IPv4 + "/name.html"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "20s", "3s").Should(ContainSubstring("client-deployment"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
It("Verifies loadBalancer service's IP is the node-external-ip", func() {
|
||||
_, err := tc.DeployWorkload("loadbalancer.yaml")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
cmd := "kubectl get svc -l k8s-app=nginx-app-loadbalancer -o=jsonpath='{range .items[*]}{.metadata.name}{.status.loadBalancer.ingress[*].ip}{end}'"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "20s", "3s").Should(ContainSubstring("client-deployment"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
It("Verifies loadBalancer service's IP is the node-external-ip", func() {
|
||||
_, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile, *hardened)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " get svc -l k8s-app=nginx-app-loadbalancer -o=jsonpath='{range .items[*]}{.metadata.name}{.status.loadBalancer.ingress[*].ip}{end}'"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "20s", "3s").Should(ContainSubstring("10.100.100"), "failed cmd: "+cmd)
|
||||
}, "20s", "3s").Should(ContainSubstring("10.100.100"), "failed cmd: "+cmd)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -166,12 +151,12 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
Expect(e2e.SaveJournalLogs(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.SaveJournalLogs(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -7,9 +7,11 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// Valid nodeOS:
|
||||
|
@ -32,11 +34,7 @@ func Test_E2EPrivateRegistry(t *testing.T) {
|
|||
RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
|
@ -45,52 +43,41 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
|
||||
})
|
||||
It("Checks Node and Pod Status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
It("Checks node and pod status", func() {
|
||||
By("Fetching Nodes status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
By("Fetching pod status")
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "10s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Create new private registry", func() {
|
||||
registry, err := e2e.RunCmdOnNode("docker run --init -d -p 5000:5000 --restart=always --name registry registry:2 ", serverNodeNames[0])
|
||||
registry, err := tc.Servers[0].RunCmdOnNode("docker run --init -d -p 5000:5000 --restart=always --name registry registry:2 ")
|
||||
fmt.Println(registry)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
})
|
||||
It("ensures registry is working", func() {
|
||||
a, err := e2e.RunCmdOnNode("docker ps -a | grep registry\n", serverNodeNames[0])
|
||||
a, err := tc.Servers[0].RunCmdOnNode("docker ps -a | grep registry\n")
|
||||
fmt.Println(a)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
@ -100,44 +87,44 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
// NODEIP:5000 as a mirror.
|
||||
It("Should pull and image from dockerhub and send it to private registry", func() {
|
||||
cmd := "docker pull docker.io/library/nginx:1.27.3"
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
nodeIP, err := e2e.FetchNodeExternalIP(serverNodeNames[0])
|
||||
nodeIP, err := tc.Servers[0].FetchNodeExternalIP()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
cmd = "docker tag docker.io/library/nginx:1.27.3 " + nodeIP + ":5000/docker-io-library/nginx:1.27.3"
|
||||
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err = tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
cmd = "docker push " + nodeIP + ":5000/docker-io-library/nginx:1.27.3"
|
||||
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err = tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
|
||||
cmd = "docker image remove docker.io/library/nginx:1.27.3 " + nodeIP + ":5000/docker-io-library/nginx:1.27.3"
|
||||
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err = tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
|
||||
})
|
||||
|
||||
It("Should create and validate deployment with private registry on", func() {
|
||||
res, err := e2e.RunCmdOnNode("kubectl create deployment my-webpage --image=my-registry.local/library/nginx:1.27.3", serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode("kubectl create deployment my-webpage --image=my-registry.local/library/nginx:1.27.3")
|
||||
fmt.Println(res)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var pod e2e.Pod
|
||||
var pod corev1.Pod
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
pods, err := tests.ParsePods(tc.KubeConfigFile)
|
||||
for _, p := range pods {
|
||||
if strings.Contains(p.Name, "my-webpage") {
|
||||
pod = p
|
||||
}
|
||||
}
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pod.Status).Should(Equal("Running"))
|
||||
g.Expect(string(pod.Status.Phase)).Should(Equal("Running"))
|
||||
}, "60s", "5s").Should(Succeed())
|
||||
|
||||
cmd := "curl " + pod.IP
|
||||
Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).To(ContainSubstring("Welcome to nginx!"))
|
||||
cmd := "curl " + pod.Status.PodIP
|
||||
Expect(tc.Servers[0].RunCmdOnNode(cmd)).To(ContainSubstring("Welcome to nginx!"))
|
||||
})
|
||||
|
||||
})
|
||||
|
@ -150,17 +137,17 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
Expect(e2e.SaveJournalLogs(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.SaveJournalLogs(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
r1, err := e2e.RunCmdOnNode("docker rm -f registry", serverNodeNames[0])
|
||||
r1, err := tc.Servers[0].RunCmdOnNode("docker rm -f registry")
|
||||
Expect(err).NotTo(HaveOccurred(), r1)
|
||||
r2, err := e2e.RunCmdOnNode("kubectl delete deployment my-webpage", serverNodeNames[0])
|
||||
r2, err := tc.Servers[0].RunCmdOnNode("kubectl delete deployment my-webpage")
|
||||
Expect(err).NotTo(HaveOccurred(), r2)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -4,9 +4,9 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -29,43 +29,40 @@ func Test_E2ERootlessStartupValidation(t *testing.T) {
|
|||
RunSpecs(t, "Startup Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
func StartK3sCluster(nodes []string, serverYAML string) error {
|
||||
func StartK3sCluster(nodes []e2e.VagrantNode, serverYAML string) error {
|
||||
for _, node := range nodes {
|
||||
|
||||
resetCmd := "head -n 3 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml"
|
||||
yamlCmd := fmt.Sprintf("echo '%s' >> /etc/rancher/k3s/config.yaml", serverYAML)
|
||||
startCmd := "systemctl --user restart k3s-rootless"
|
||||
|
||||
if _, err := e2e.RunCmdOnNode(resetCmd, node); err != nil {
|
||||
if _, err := node.RunCmdOnNode(resetCmd); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := e2e.RunCmdOnNode(yamlCmd, node); err != nil {
|
||||
if _, err := node.RunCmdOnNode(yamlCmd); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := RunCmdOnRootlesNode("systemctl --user daemon-reload", node); err != nil {
|
||||
if _, err := RunCmdOnRootlessNode("systemctl --user daemon-reload", node.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := RunCmdOnRootlesNode(startCmd, node); err != nil {
|
||||
if _, err := RunCmdOnRootlessNode(startCmd, node.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func KillK3sCluster(nodes []string) error {
|
||||
func KillK3sCluster(nodes []e2e.VagrantNode) error {
|
||||
for _, node := range nodes {
|
||||
if _, err := RunCmdOnRootlesNode(`systemctl --user stop k3s-rootless`, node); err != nil {
|
||||
if _, err := RunCmdOnRootlessNode(`systemctl --user stop k3s-rootless`, node.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := RunCmdOnRootlesNode("k3s-killall.sh", node); err != nil {
|
||||
if _, err := RunCmdOnRootlessNode("k3s-killall.sh", node.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := RunCmdOnRootlesNode("rm -rf /home/vagrant/.rancher/k3s/server/db", node); err != nil {
|
||||
if _, err := RunCmdOnRootlessNode("rm -rf /home/vagrant/.rancher/k3s/server/db", node.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -77,13 +74,13 @@ var _ = ReportAfterEach(e2e.GenReport)
|
|||
var _ = BeforeSuite(func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, _, err = e2e.CreateLocalCluster(*nodeOS, 1, 0)
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, 1, 0)
|
||||
} else {
|
||||
serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, 1, 0)
|
||||
tc, err = e2e.CreateCluster(*nodeOS, 1, 0)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
//Checks if system is using cgroup v2
|
||||
_, err = e2e.RunCmdOnNode("cat /sys/fs/cgroup/cgroup.controllers", serverNodeNames[0])
|
||||
_, err = tc.Servers[0].RunCmdOnNode("cat /sys/fs/cgroup/cgroup.controllers")
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
|
||||
})
|
||||
|
@ -91,40 +88,32 @@ var _ = BeforeSuite(func() {
|
|||
var _ = Describe("Various Startup Configurations", Ordered, func() {
|
||||
Context("Verify standard startup :", func() {
|
||||
It("Starts K3s with no issues", func() {
|
||||
err := StartK3sCluster(serverNodeNames, "")
|
||||
err := StartK3sCluster(tc.Servers, "")
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
kubeConfigFile, err = GenRootlessKubeConfigFile(serverNodeNames[0])
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
kubeConfigFile, err := GenRootlessKubeConfigFile(tc.Servers[0].String())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
tc.KubeConfigFile = kubeConfigFile
|
||||
})
|
||||
|
||||
It("Checks node and pod status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
By("Fetching Nodes status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, false)
|
||||
_, _ = e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
|
||||
fmt.Printf("\nFetching pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, false)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("Returns pod metrics", func() {
|
||||
|
@ -154,7 +143,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Kills the cluster", func() {
|
||||
err := KillK3sCluster(serverNodeNames)
|
||||
err := KillK3sCluster(tc.Servers)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
@ -168,12 +157,12 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, serverNodeNames))
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, tc.Servers))
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(serverNodeNames)).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(tc.Servers)).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -8,8 +8,8 @@ import (
|
|||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
)
|
||||
|
||||
// RunCmdOnRootlesNode executes a command from within the given node as user vagrant
|
||||
func RunCmdOnRootlesNode(cmd string, nodename string) (string, error) {
|
||||
// RunCmdOnRootlessNode executes a command from within the given node as user vagrant
|
||||
func RunCmdOnRootlessNode(cmd string, nodename string) (string, error) {
|
||||
injectEnv := ""
|
||||
if _, ok := os.LookupEnv("E2E_GOCOVER"); ok && strings.HasPrefix(cmd, "k3s") {
|
||||
injectEnv = "GOCOVERDIR=/tmp/k3scov "
|
||||
|
@ -23,11 +23,12 @@ func RunCmdOnRootlesNode(cmd string, nodename string) (string, error) {
|
|||
}
|
||||
|
||||
func GenRootlessKubeConfigFile(serverName string) (string, error) {
|
||||
kubeConfig, err := RunCmdOnRootlesNode("cat /home/vagrant/.kube/k3s.yaml", serverName)
|
||||
kubeConfig, err := RunCmdOnRootlessNode("cat /home/vagrant/.kube/k3s.yaml", serverName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
nodeIP, err := e2e.FetchNodeExternalIP(serverName)
|
||||
vNode := e2e.VagrantNode(serverName)
|
||||
nodeIP, err := vNode.FetchNodeExternalIP()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -2,11 +2,10 @@ package rotateca
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -29,11 +28,7 @@ func Test_E2ECustomCARotation(t *testing.T) {
|
|||
RunSpecs(t, "Custom Certificate Rotation Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
agentNodeNames []string
|
||||
serverNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
|
@ -42,43 +37,32 @@ var _ = Describe("Verify Custom CA Rotation", Ordered, func() {
|
|||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
|
||||
})
|
||||
|
||||
It("Checks node and pod status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
By("Fetching Nodes status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
e2e.ParseNodes(tc.KubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("Generates New CA Certificates", func() {
|
||||
|
@ -88,46 +72,38 @@ var _ = Describe("Verify Custom CA Rotation", Ordered, func() {
|
|||
"DATA_DIR=/opt/rancher/k3s /tmp/generate-custom-ca-certs.sh",
|
||||
}
|
||||
for _, cmd := range cmds {
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
It("Rotates CA Certificates", func() {
|
||||
cmd := "k3s certificate rotate-ca --path=/opt/rancher/k3s/server"
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Restarts K3s servers", func() {
|
||||
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
|
||||
Expect(e2e.RestartCluster(tc.Servers)).To(Succeed())
|
||||
})
|
||||
|
||||
It("Restarts K3s agents", func() {
|
||||
Expect(e2e.RestartCluster(agentNodeNames)).To(Succeed())
|
||||
Expect(e2e.RestartCluster(tc.Agents)).To(Succeed())
|
||||
})
|
||||
|
||||
It("Checks node and pod status", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -139,12 +115,12 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...)))
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(tc.Servers, tc.Agents...)))
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -31,11 +32,7 @@ func Test_E2ES3(t *testing.T) {
|
|||
RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
|
@ -44,93 +41,79 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, 1, 0)
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, 1, 0)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, 1, 0)
|
||||
tc, err = e2e.CreateCluster(*nodeOS, 1, 0)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
})
|
||||
It("Checks Node and Pod Status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
It("Checks node and pod status", func() {
|
||||
By("Fetching Nodes status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
e2e.ParseNodes(tc.KubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("ensures s3 mock is working", func() {
|
||||
res, err := e2e.RunCmdOnNode("docker ps -a | grep mock\n", serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode("docker ps -a | grep mock\n")
|
||||
fmt.Println(res)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
It("save s3 snapshot using CLI", func() {
|
||||
res, err := e2e.RunCmdOnNode("k3s etcd-snapshot save "+
|
||||
"--etcd-s3-insecure=true "+
|
||||
"--etcd-s3-bucket=test-bucket "+
|
||||
"--etcd-s3-folder=test-folder "+
|
||||
"--etcd-s3-endpoint=localhost:9090 "+
|
||||
"--etcd-s3-skip-ssl-verify=true "+
|
||||
"--etcd-s3-access-key=test ",
|
||||
serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot save " +
|
||||
"--etcd-s3-insecure=true " +
|
||||
"--etcd-s3-bucket=test-bucket " +
|
||||
"--etcd-s3-folder=test-folder " +
|
||||
"--etcd-s3-endpoint=localhost:9090 " +
|
||||
"--etcd-s3-skip-ssl-verify=true " +
|
||||
"--etcd-s3-access-key=test ")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).To(ContainSubstring("Snapshot on-demand-server-0"))
|
||||
})
|
||||
It("creates s3 config secret", func() {
|
||||
res, err := e2e.RunCmdOnNode("k3s kubectl create secret generic k3s-etcd-s3-config --namespace=kube-system "+
|
||||
"--from-literal=etcd-s3-insecure=true "+
|
||||
"--from-literal=etcd-s3-bucket=test-bucket "+
|
||||
"--from-literal=etcd-s3-folder=test-folder "+
|
||||
"--from-literal=etcd-s3-endpoint=localhost:9090 "+
|
||||
"--from-literal=etcd-s3-skip-ssl-verify=true "+
|
||||
"--from-literal=etcd-s3-access-key=test ",
|
||||
serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode("k3s kubectl create secret generic k3s-etcd-s3-config --namespace=kube-system " +
|
||||
"--from-literal=etcd-s3-insecure=true " +
|
||||
"--from-literal=etcd-s3-bucket=test-bucket " +
|
||||
"--from-literal=etcd-s3-folder=test-folder " +
|
||||
"--from-literal=etcd-s3-endpoint=localhost:9090 " +
|
||||
"--from-literal=etcd-s3-skip-ssl-verify=true " +
|
||||
"--from-literal=etcd-s3-access-key=test ")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).To(ContainSubstring("secret/k3s-etcd-s3-config created"))
|
||||
})
|
||||
It("save s3 snapshot using secret", func() {
|
||||
res, err := e2e.RunCmdOnNode("k3s etcd-snapshot save", serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot save")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).To(ContainSubstring("Snapshot on-demand-server-0"))
|
||||
})
|
||||
It("lists saved s3 snapshot", func() {
|
||||
res, err := e2e.RunCmdOnNode("k3s etcd-snapshot list", serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot list")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).To(ContainSubstring("file:///var/lib/rancher/k3s/server/db/snapshots/on-demand-server-0"))
|
||||
Expect(res).To(ContainSubstring("s3://test-bucket/test-folder/on-demand-server-0"))
|
||||
})
|
||||
It("save 3 more s3 snapshots", func() {
|
||||
for _, i := range []string{"1", "2", "3"} {
|
||||
res, err := e2e.RunCmdOnNode("k3s etcd-snapshot save --name special-"+i, serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot save --name special-" + i)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).To(ContainSubstring("Snapshot special-" + i + "-server-0"))
|
||||
}
|
||||
})
|
||||
It("lists saved s3 snapshot", func() {
|
||||
res, err := e2e.RunCmdOnNode("k3s etcd-snapshot list", serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot list")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).To(ContainSubstring("s3://test-bucket/test-folder/on-demand-server-0"))
|
||||
Expect(res).To(ContainSubstring("s3://test-bucket/test-folder/special-1-server-0"))
|
||||
|
@ -138,25 +121,25 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
Expect(res).To(ContainSubstring("s3://test-bucket/test-folder/special-3-server-0"))
|
||||
})
|
||||
It("delete first on-demand s3 snapshot", func() {
|
||||
_, err := e2e.RunCmdOnNode("sudo k3s etcd-snapshot ls >> ./snapshotname.txt", serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode("sudo k3s etcd-snapshot ls >> ./snapshotname.txt")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
snapshotName, err := e2e.RunCmdOnNode("grep -Eo 'on-demand-server-0-([0-9]+)' ./snapshotname.txt | head -1", serverNodeNames[0])
|
||||
snapshotName, err := tc.Servers[0].RunCmdOnNode("grep -Eo 'on-demand-server-0-([0-9]+)' ./snapshotname.txt | head -1")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
res, err := e2e.RunCmdOnNode("sudo k3s etcd-snapshot delete "+snapshotName, serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode("sudo k3s etcd-snapshot delete " + snapshotName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).To(ContainSubstring("Snapshot " + strings.TrimSpace(snapshotName) + " deleted"))
|
||||
})
|
||||
It("prunes s3 snapshots", func() {
|
||||
_, err := e2e.RunCmdOnNode("k3s etcd-snapshot save", serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot save")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
time.Sleep(time.Second)
|
||||
_, err = e2e.RunCmdOnNode("k3s etcd-snapshot save", serverNodeNames[0])
|
||||
_, err = tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot save")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
time.Sleep(time.Second)
|
||||
res, err := e2e.RunCmdOnNode("k3s etcd-snapshot prune", serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot prune")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// There should now be 4 on-demand snapshots - 2 local, and 2 on s3
|
||||
res, err = e2e.RunCmdOnNode("k3s etcd-snapshot ls 2>/dev/null | grep on-demand | wc -l", serverNodeNames[0])
|
||||
res, err = tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot ls 2>/dev/null | grep on-demand | wc -l")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(strings.TrimSpace(res)).To(Equal("4"))
|
||||
})
|
||||
|
@ -164,7 +147,7 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
// Wait until the retention works with 3 minutes
|
||||
fmt.Printf("\nWaiting 3 minutes until retention works\n")
|
||||
time.Sleep(3 * time.Minute)
|
||||
res, err := e2e.RunCmdOnNode("k3s etcd-snapshot ls 2>/dev/null | grep etcd-snapshot | wc -l", serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot ls 2>/dev/null | grep etcd-snapshot | wc -l")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(strings.TrimSpace(res)).To(Equal("4"))
|
||||
})
|
||||
|
@ -178,12 +161,12 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
Expect(e2e.SaveJournalLogs(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.SaveJournalLogs(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -2,11 +2,10 @@ package secretsencryption
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -32,10 +31,7 @@ func Test_E2ESecretsEncryption(t *testing.T) {
|
|||
RunSpecs(t, "Secrets Encryption Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
|
@ -44,53 +40,44 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, _, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, 0)
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, 0)
|
||||
} else {
|
||||
serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, *serverCount, 0)
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, 0)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
tc.Hardened = *hardened
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Checks node and pod status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
By("Fetching Nodes status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
e2e.ParseNodes(tc.KubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("Deploys several secrets", func() {
|
||||
_, err := e2e.DeployWorkload("secrets.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("secrets.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Secrets not deployed")
|
||||
})
|
||||
|
||||
It("Verifies encryption start stage", func() {
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
for _, node := range tc.Servers {
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).Should(ContainSubstring("Encryption Status: Enabled"))
|
||||
Expect(res).Should(ContainSubstring("Current Rotation Stage: start"))
|
||||
|
@ -100,12 +87,12 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
|
||||
It("Rotates the Secrets-Encryption Keys", func() {
|
||||
cmd := "k3s secrets-encrypt rotate-keys"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
for i, nodeName := range serverNodeNames {
|
||||
for i, node := range tc.Servers {
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred(), res)
|
||||
g.Expect(res).Should(ContainSubstring("Server Encryption Hashes: hash does not match"))
|
||||
if i == 0 {
|
||||
|
@ -118,14 +105,14 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Restarts K3s servers", func() {
|
||||
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil))
|
||||
Expect(e2e.RestartCluster(tc.Servers)).To(Succeed(), e2e.GetVagrantLog(nil))
|
||||
})
|
||||
|
||||
It("Verifies reencryption_finished stage", func() {
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
for _, node := range tc.Servers {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("Encryption Status: Enabled"))
|
||||
g.Expect(res).Should(ContainSubstring("Current Rotation Stage: reencrypt_finished"))
|
||||
|
@ -139,17 +126,17 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
Context("Disabling Secrets-Encryption", func() {
|
||||
It("Disables encryption", func() {
|
||||
cmd := "k3s secrets-encrypt disable"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
|
||||
cmd = "k3s secrets-encrypt status"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
return tc.Servers[0].RunCmdOnNode(cmd)
|
||||
}, "240s", "10s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished"))
|
||||
|
||||
for i, nodeName := range serverNodeNames {
|
||||
for i, node := range tc.Servers {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred(), res)
|
||||
if i == 0 {
|
||||
g.Expect(res).Should(ContainSubstring("Encryption Status: Disabled"))
|
||||
|
@ -161,14 +148,14 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Restarts K3s servers", func() {
|
||||
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
|
||||
Expect(e2e.RestartCluster(tc.Servers)).To(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies encryption disabled on all nodes", func() {
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
for _, node := range tc.Servers {
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Disabled"))
|
||||
g.Expect(node.RunCmdOnNode(cmd)).Should(ContainSubstring("Encryption Status: Disabled"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
|
@ -178,17 +165,17 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
Context("Enabling Secrets-Encryption", func() {
|
||||
It("Enables encryption", func() {
|
||||
cmd := "k3s secrets-encrypt enable"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
|
||||
cmd = "k3s secrets-encrypt status"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
return tc.Servers[0].RunCmdOnNode(cmd)
|
||||
}, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished"))
|
||||
|
||||
for i, nodeName := range serverNodeNames {
|
||||
for i, node := range tc.Servers {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred(), res)
|
||||
if i == 0 {
|
||||
g.Expect(res).Should(ContainSubstring("Encryption Status: Enabled"))
|
||||
|
@ -200,14 +187,14 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Restarts K3s servers", func() {
|
||||
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
|
||||
Expect(e2e.RestartCluster(tc.Servers)).To(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies encryption enabled on all nodes", func() {
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
for _, node := range tc.Servers {
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Enabled"))
|
||||
g.Expect(node.RunCmdOnNode(cmd)).Should(ContainSubstring("Encryption Status: Enabled"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
|
@ -222,12 +209,12 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, serverNodeNames))
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, tc.Servers))
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(serverNodeNames)).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(tc.Servers)).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -2,11 +2,10 @@ package secretsencryption
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -29,10 +28,7 @@ func Test_E2ESecretsEncryptionOld(t *testing.T) {
|
|||
RunSpecs(t, "Secrets Encryption Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
|
@ -41,53 +37,43 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, _, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, 0)
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, 0)
|
||||
} else {
|
||||
serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, *serverCount, 0)
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, 0)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
tc.Hardened = *hardened
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
|
||||
})
|
||||
|
||||
It("Checks node and pod status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
By("Fetching Nodes status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("Deploys several secrets", func() {
|
||||
_, err := e2e.DeployWorkload("secrets.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("secrets.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Secrets not deployed")
|
||||
})
|
||||
|
||||
It("Verifies encryption start stage", func() {
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
for _, node := range tc.Servers {
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).Should(ContainSubstring("Encryption Status: Enabled"))
|
||||
Expect(res).Should(ContainSubstring("Current Rotation Stage: start"))
|
||||
|
@ -97,11 +83,11 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
|
||||
It("Prepares for Secrets-Encryption Rotation", func() {
|
||||
cmd := "k3s secrets-encrypt prepare"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
for i, nodeName := range serverNodeNames {
|
||||
for i, node := range tc.Servers {
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
Expect(res).Should(ContainSubstring("Server Encryption Hashes: hash does not match"))
|
||||
if i == 0 {
|
||||
|
@ -113,37 +99,29 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Restarts K3s servers", func() {
|
||||
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil))
|
||||
Expect(e2e.RestartCluster(tc.Servers)).To(Succeed(), e2e.GetVagrantLog(nil))
|
||||
})
|
||||
|
||||
It("Checks node and pod status", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("Verifies encryption prepare stage", func() {
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
for _, node := range tc.Servers {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("Encryption Status: Enabled"))
|
||||
g.Expect(res).Should(ContainSubstring("Current Rotation Stage: prepare"))
|
||||
|
@ -154,12 +132,12 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
|
||||
It("Rotates the Secrets-Encryption Keys", func() {
|
||||
cmd := "k3s secrets-encrypt rotate"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
for i, nodeName := range serverNodeNames {
|
||||
for i, node := range tc.Servers {
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred(), res)
|
||||
g.Expect(res).Should(ContainSubstring("Server Encryption Hashes: hash does not match"))
|
||||
if i == 0 {
|
||||
|
@ -172,14 +150,14 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Restarts K3s servers", func() {
|
||||
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil))
|
||||
Expect(e2e.RestartCluster(tc.Servers)).To(Succeed(), e2e.GetVagrantLog(nil))
|
||||
})
|
||||
|
||||
It("Verifies encryption rotate stage", func() {
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
for _, node := range tc.Servers {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("Encryption Status: Enabled"))
|
||||
g.Expect(res).Should(ContainSubstring("Current Rotation Stage: rotate"))
|
||||
|
@ -190,16 +168,16 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
|
||||
It("Reencrypts the Secrets-Encryption Keys", func() {
|
||||
cmd := "k3s secrets-encrypt reencrypt"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
|
||||
cmd = "k3s secrets-encrypt status"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
return tc.Servers[0].RunCmdOnNode(cmd)
|
||||
}, "240s", "10s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished"))
|
||||
|
||||
for _, nodeName := range serverNodeNames[1:] {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
for _, node := range tc.Servers[1:] {
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
Expect(res).Should(ContainSubstring("Server Encryption Hashes: hash does not match"))
|
||||
Expect(res).Should(ContainSubstring("Current Rotation Stage: rotate"))
|
||||
|
@ -207,14 +185,14 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Restarts K3s Servers", func() {
|
||||
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil))
|
||||
Expect(e2e.RestartCluster(tc.Servers)).To(Succeed(), e2e.GetVagrantLog(nil))
|
||||
})
|
||||
|
||||
It("Verifies Encryption Reencrypt Stage", func() {
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
for _, node := range tc.Servers {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("Encryption Status: Enabled"))
|
||||
g.Expect(res).Should(ContainSubstring("Current Rotation Stage: reencrypt_finished"))
|
||||
|
@ -227,21 +205,21 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
Context("Disabling Secrets-Encryption", func() {
|
||||
It("Disables encryption", func() {
|
||||
cmd := "k3s secrets-encrypt disable"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
|
||||
cmd = "k3s secrets-encrypt reencrypt -f --skip"
|
||||
res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
res, err = tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
|
||||
cmd = "k3s secrets-encrypt status"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
return tc.Servers[0].RunCmdOnNode(cmd)
|
||||
}, "240s", "10s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished"))
|
||||
|
||||
for i, nodeName := range serverNodeNames {
|
||||
for i, node := range tc.Servers {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred(), res)
|
||||
if i == 0 {
|
||||
g.Expect(res).Should(ContainSubstring("Encryption Status: Disabled"))
|
||||
|
@ -253,14 +231,14 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Restarts K3s servers", func() {
|
||||
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
|
||||
Expect(e2e.RestartCluster(tc.Servers)).To(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies encryption disabled on all nodes", func() {
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
for _, node := range tc.Servers {
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Disabled"))
|
||||
g.Expect(node.RunCmdOnNode(cmd)).Should(ContainSubstring("Encryption Status: Disabled"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
|
@ -270,28 +248,28 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
|
|||
Context("Enabling Secrets-Encryption", func() {
|
||||
It("Enables encryption", func() {
|
||||
cmd := "k3s secrets-encrypt enable"
|
||||
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
|
||||
cmd = "k3s secrets-encrypt reencrypt -f --skip"
|
||||
res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
res, err = tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), res)
|
||||
|
||||
cmd = "k3s secrets-encrypt status"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
return tc.Servers[0].RunCmdOnNode(cmd)
|
||||
}, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished"))
|
||||
})
|
||||
|
||||
It("Restarts K3s servers", func() {
|
||||
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
|
||||
Expect(e2e.RestartCluster(tc.Servers)).To(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies encryption enabled on all nodes", func() {
|
||||
cmd := "k3s secrets-encrypt status"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
for _, node := range tc.Servers {
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Enabled"))
|
||||
g.Expect(node.RunCmdOnNode(cmd)).Should(ContainSubstring("Encryption Status: Enabled"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
}
|
||||
|
||||
|
@ -307,10 +285,10 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if !failed {
|
||||
Expect(e2e.GetCoverageReport(serverNodeNames)).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(tc.Servers)).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -30,28 +31,28 @@ var hardened = flag.Bool("hardened", false, "true or false")
|
|||
|
||||
// createSplitCluster creates a split server cluster with the given nodeOS, etcdCount, controlPlaneCount, and agentCount.
|
||||
// It duplicates and merges functionality found in the e2e.CreateCluster and e2e.CreateLocalCluster functions.
|
||||
func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount int, local bool) ([]string, []string, []string, error) {
|
||||
etcdNodeNames := make([]string, etcdCount)
|
||||
cpNodeNames := make([]string, controlPlaneCount)
|
||||
agentNodeNames := make([]string, agentCount)
|
||||
func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount int, local bool) ([]e2e.VagrantNode, []e2e.VagrantNode, []e2e.VagrantNode, error) {
|
||||
etcdNodes := make([]e2e.VagrantNode, etcdCount)
|
||||
cpNodes := make([]e2e.VagrantNode, controlPlaneCount)
|
||||
agentNodes := make([]e2e.VagrantNode, agentCount)
|
||||
|
||||
for i := 0; i < etcdCount; i++ {
|
||||
etcdNodeNames[i] = "server-etcd-" + strconv.Itoa(i)
|
||||
etcdNodes[i] = e2e.VagrantNode("server-etcd-" + strconv.Itoa(i))
|
||||
}
|
||||
for i := 0; i < controlPlaneCount; i++ {
|
||||
cpNodeNames[i] = "server-cp-" + strconv.Itoa(i)
|
||||
cpNodes[i] = e2e.VagrantNode("server-cp-" + strconv.Itoa(i))
|
||||
}
|
||||
for i := 0; i < agentCount; i++ {
|
||||
agentNodeNames[i] = "agent-" + strconv.Itoa(i)
|
||||
agentNodes[i] = e2e.VagrantNode("agent-" + strconv.Itoa(i))
|
||||
}
|
||||
nodeRoles := strings.Join(etcdNodeNames, " ") + " " + strings.Join(cpNodeNames, " ") + " " + strings.Join(agentNodeNames, " ")
|
||||
nodeRoles := strings.Join(e2e.VagrantSlice(etcdNodes), " ") + " " + strings.Join(e2e.VagrantSlice(cpNodes), " ") + " " + strings.Join(e2e.VagrantSlice(agentNodes), " ")
|
||||
|
||||
nodeRoles = strings.TrimSpace(nodeRoles)
|
||||
nodeBoxes := strings.Repeat(nodeOS+" ", etcdCount+controlPlaneCount+agentCount)
|
||||
nodeBoxes = strings.TrimSpace(nodeBoxes)
|
||||
|
||||
allNodes := append(etcdNodeNames, cpNodeNames...)
|
||||
allNodes = append(allNodes, agentNodeNames...)
|
||||
allNodeNames := append(e2e.VagrantSlice(etcdNodes), e2e.VagrantSlice(cpNodes)...)
|
||||
allNodeNames = append(allNodeNames, e2e.VagrantSlice(agentNodes)...)
|
||||
|
||||
var testOptions string
|
||||
for _, env := range os.Environ() {
|
||||
|
@ -62,15 +63,15 @@ func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount
|
|||
|
||||
// Provision the first etcd node. In GitHub Actions, this also imports the VM image into libvirt, which
|
||||
// takes time and can cause the next vagrant up to fail if it is not given enough time to complete.
|
||||
cmd := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" vagrant up --no-provision %s &> vagrant.log`, nodeRoles, nodeBoxes, etcdNodeNames[0])
|
||||
cmd := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" vagrant up --no-provision %s &> vagrant.log`, nodeRoles, nodeBoxes, etcdNodes[0].String())
|
||||
fmt.Println(cmd)
|
||||
if _, err := e2e.RunCommand(cmd); err != nil {
|
||||
return etcdNodeNames, cpNodeNames, agentNodeNames, err
|
||||
return etcdNodes, cpNodes, agentNodes, err
|
||||
}
|
||||
|
||||
// Bring up the rest of the nodes in parallel
|
||||
errg, _ := errgroup.WithContext(context.Background())
|
||||
for _, node := range allNodes[1:] {
|
||||
for _, node := range allNodeNames[1:] {
|
||||
cmd := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" vagrant up --no-provision %s &>> vagrant.log`, nodeRoles, nodeBoxes, node)
|
||||
errg.Go(func() error {
|
||||
_, err := e2e.RunCommand(cmd)
|
||||
|
@ -80,24 +81,25 @@ func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount
|
|||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
if err := errg.Wait(); err != nil {
|
||||
return etcdNodeNames, cpNodeNames, agentNodeNames, err
|
||||
return etcdNodes, cpNodes, agentNodes, err
|
||||
}
|
||||
|
||||
if local {
|
||||
testOptions += " E2E_RELEASE_VERSION=skip"
|
||||
for _, node := range allNodes {
|
||||
for _, node := range allNodeNames {
|
||||
cmd := fmt.Sprintf(`E2E_NODE_ROLES=%s vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node, node)
|
||||
if _, err := e2e.RunCommand(cmd); err != nil {
|
||||
return etcdNodeNames, cpNodeNames, agentNodeNames, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
|
||||
return etcdNodes, cpNodes, agentNodes, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
|
||||
}
|
||||
if _, err := e2e.RunCmdOnNode("mv /tmp/k3s /usr/local/bin/", node); err != nil {
|
||||
return etcdNodeNames, cpNodeNames, agentNodeNames, err
|
||||
cmd = fmt.Sprintf(`E2E_NODE_ROLES=%s vagrant ssh %s -c "sudo mv /tmp/k3s /usr/local/bin/"`, node, node)
|
||||
if _, err := e2e.RunCommand(cmd); err != nil {
|
||||
return etcdNodes, cpNodes, agentNodes, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Install K3s on all nodes in parallel
|
||||
errg, _ = errgroup.WithContext(context.Background())
|
||||
for _, node := range allNodes {
|
||||
for _, node := range allNodeNames {
|
||||
cmd = fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" %s vagrant provision %s &>> vagrant.log`, nodeRoles, nodeBoxes, testOptions, node)
|
||||
errg.Go(func() error {
|
||||
_, err := e2e.RunCommand(cmd)
|
||||
|
@ -107,9 +109,9 @@ func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount
|
|||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
if err := errg.Wait(); err != nil {
|
||||
return etcdNodeNames, cpNodeNames, agentNodeNames, err
|
||||
return etcdNodes, cpNodes, agentNodes, err
|
||||
}
|
||||
return etcdNodeNames, cpNodeNames, agentNodeNames, nil
|
||||
return etcdNodes, cpNodes, agentNodes, nil
|
||||
}
|
||||
|
||||
func Test_E2ESplitServer(t *testing.T) {
|
||||
|
@ -120,10 +122,10 @@ func Test_E2ESplitServer(t *testing.T) {
|
|||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
etcdNodeNames []string
|
||||
cpNodeNames []string
|
||||
agentNodeNames []string
|
||||
tc *e2e.TestConfig // We don't use the Server and Agents from this
|
||||
etcdNodes []e2e.VagrantNode
|
||||
cpNodes []e2e.VagrantNode
|
||||
agentNodes []e2e.VagrantNode
|
||||
)
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
@ -132,72 +134,66 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
Context("Cluster :", func() {
|
||||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
etcdNodeNames, cpNodeNames, agentNodeNames, err = createSplitCluster(*nodeOS, *etcdCount, *controlPlaneCount, *agentCount, *local)
|
||||
etcdNodes, cpNodes, agentNodes, err = createSplitCluster(*nodeOS, *etcdCount, *controlPlaneCount, *agentCount, *local)
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Etcd Server Nodes:", etcdNodeNames)
|
||||
fmt.Println("Control Plane Server Nodes:", cpNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(cpNodeNames[0])
|
||||
fmt.Println("Etcd Server Nodes:", etcdNodes)
|
||||
fmt.Println("Control Plane Server Nodes:", cpNodes)
|
||||
fmt.Println("Agent Nodes:", agentNodes)
|
||||
kubeConfigFile, err := e2e.GenKubeConfigFile(cpNodes[0].String())
|
||||
tc = &e2e.TestConfig{
|
||||
KubeConfigFile: kubeConfigFile,
|
||||
Hardened: *hardened,
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Checks Node and Pod Status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
It("Checks node and pod status", func() {
|
||||
By("Fetching Nodes status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("Verifies ClusterIP Service", func() {
|
||||
_, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("clusterip.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
|
||||
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
|
||||
|
||||
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false)
|
||||
clusterip, _ := e2e.FetchClusterIP(tc.KubeConfigFile, "nginx-clusterip-svc", false)
|
||||
cmd = "curl -L --insecure http://" + clusterip + "/name.html"
|
||||
for _, nodeName := range cpNodeNames {
|
||||
for _, node := range cpNodes {
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, nodeName)
|
||||
return node.RunCmdOnNode(cmd)
|
||||
}, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies NodePort Service", func() {
|
||||
_, err := e2e.DeployWorkload("nodeport.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("nodeport.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed")
|
||||
|
||||
for _, nodeName := range cpNodeNames {
|
||||
nodeExternalIP, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
for _, node := range cpNodes {
|
||||
nodeExternalIP, _ := node.FetchNodeExternalIP()
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
|
||||
|
@ -210,17 +206,17 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies LoadBalancer Service", func() {
|
||||
_, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("loadbalancer.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed")
|
||||
|
||||
for _, nodeName := range cpNodeNames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
for _, node := range cpNodes {
|
||||
ip, _ := node.FetchNodeExternalIP()
|
||||
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
port, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"), "failed cmd: "+cmd)
|
||||
|
@ -233,11 +229,11 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies Ingress", func() {
|
||||
_, err := e2e.DeployWorkload("ingress.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("ingress.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
|
||||
|
||||
for _, nodeName := range cpNodeNames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
for _, node := range cpNodes {
|
||||
ip, _ := node.FetchNodeExternalIP()
|
||||
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
|
@ -246,30 +242,26 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies Daemonset", func() {
|
||||
_, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("daemonset.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
pods, _ := e2e.ParsePods(kubeConfigFile, false)
|
||||
count := e2e.CountOfStringInSlice("test-daemonset", pods)
|
||||
fmt.Println("POD COUNT")
|
||||
fmt.Println(count)
|
||||
fmt.Println("CP COUNT")
|
||||
fmt.Println(len(cpNodeNames))
|
||||
g.Expect(len(cpNodeNames)).Should((Equal(count)), "Daemonset pod count does not match cp node count")
|
||||
count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeConfigFile)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(cpNodes).To(HaveLen(count), "Daemonset pod count does not match cp node count")
|
||||
}, "240s", "10s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies dns access", func() {
|
||||
_, err := e2e.DeployWorkload("dnsutils.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("dnsutils.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed")
|
||||
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + tc.KubeConfigFile
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "420s", "2s").Should(ContainSubstring("dnsutils"), "failed cmd: "+cmd)
|
||||
|
||||
cmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||
cmd = "kubectl --kubeconfig=" + tc.KubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "420s", "2s").Should(ContainSubstring("kubernetes.default.svc.cluster.local"), "failed cmd: "+cmd)
|
||||
|
@ -283,8 +275,8 @@ var _ = AfterEach(func() {
|
|||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
allNodes := append(cpNodeNames, etcdNodeNames...)
|
||||
allNodes = append(allNodes, agentNodeNames...)
|
||||
allNodes := append(cpNodes, etcdNodes...)
|
||||
allNodes = append(allNodes, agentNodes...)
|
||||
if failed {
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, allNodes))
|
||||
} else {
|
||||
|
@ -292,6 +284,6 @@ var _ = AfterSuite(func() {
|
|||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -32,19 +33,15 @@ func Test_E2EStartupValidation(t *testing.T) {
|
|||
RunSpecs(t, "Startup Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
func StartK3sCluster(nodes []string, serverYAML string, agentYAML string) error {
|
||||
func StartK3sCluster(nodes []e2e.VagrantNode, serverYAML string, agentYAML string) error {
|
||||
|
||||
for _, node := range nodes {
|
||||
var yamlCmd string
|
||||
var resetCmd string
|
||||
var startCmd string
|
||||
if strings.Contains(node, "server") {
|
||||
if strings.Contains(node.String(), "server") {
|
||||
resetCmd = "head -n 3 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml"
|
||||
yamlCmd = fmt.Sprintf("echo '%s' >> /etc/rancher/k3s/config.yaml", serverYAML)
|
||||
startCmd = "systemctl start k3s"
|
||||
|
@ -53,32 +50,32 @@ func StartK3sCluster(nodes []string, serverYAML string, agentYAML string) error
|
|||
yamlCmd = fmt.Sprintf("echo '%s' >> /etc/rancher/k3s/config.yaml", agentYAML)
|
||||
startCmd = "systemctl start k3s-agent"
|
||||
}
|
||||
if _, err := e2e.RunCmdOnNode(resetCmd, node); err != nil {
|
||||
if _, err := node.RunCmdOnNode(resetCmd); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := e2e.RunCmdOnNode(yamlCmd, node); err != nil {
|
||||
if _, err := node.RunCmdOnNode(yamlCmd); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := e2e.RunCmdOnNode(startCmd, node); err != nil {
|
||||
if _, err := node.RunCmdOnNode(startCmd); err != nil {
|
||||
return &e2e.NodeError{Node: node, Cmd: startCmd, Err: err}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func KillK3sCluster(nodes []string) error {
|
||||
func KillK3sCluster(nodes []e2e.VagrantNode) error {
|
||||
for _, node := range nodes {
|
||||
if _, err := e2e.RunCmdOnNode("k3s-killall.sh", node); err != nil {
|
||||
if _, err := node.RunCmdOnNode("k3s-killall.sh"); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := e2e.RunCmdOnNode("journalctl --flush --sync --rotate --vacuum-size=1", node); err != nil {
|
||||
if _, err := node.RunCmdOnNode("journalctl --flush --sync --rotate --vacuum-size=1"); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := e2e.RunCmdOnNode("rm -rf /etc/rancher/k3s/config.yaml.d", node); err != nil {
|
||||
if _, err := node.RunCmdOnNode("rm -rf /etc/rancher/k3s/config.yaml.d"); err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.Contains(node, "server") {
|
||||
if _, err := e2e.RunCmdOnNode("rm -rf /var/lib/rancher/k3s/server/db", node); err != nil {
|
||||
if strings.Contains(node.String(), "server") {
|
||||
if _, err := node.RunCmdOnNode("rm -rf /var/lib/rancher/k3s/server/db"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -91,9 +88,9 @@ var _ = ReportAfterEach(e2e.GenReport)
|
|||
var _ = BeforeSuite(func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, 1, 1)
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, 1, 1)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, 1, 1)
|
||||
tc, err = e2e.CreateCluster(*nodeOS, 1, 1)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
})
|
||||
|
@ -101,48 +98,36 @@ var _ = BeforeSuite(func() {
|
|||
var _ = Describe("Various Startup Configurations", Ordered, func() {
|
||||
Context("Verify dedicated supervisor port", func() {
|
||||
It("Starts K3s with no issues", func() {
|
||||
for _, node := range agentNodeNames {
|
||||
for _, node := range tc.Agents {
|
||||
cmd := "mkdir -p /etc/rancher/k3s/config.yaml.d; grep -F server: /etc/rancher/k3s/config.yaml | sed s/6443/9345/ > /tmp/99-server.yaml; sudo mv /tmp/99-server.yaml /etc/rancher/k3s/config.yaml.d/"
|
||||
res, err := e2e.RunCmdOnNode(cmd, node)
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
By("checking command results: " + res)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
supervisorPortYAML := "supervisor-port: 9345\napiserver-port: 6443\napiserver-bind-address: 0.0.0.0\ndisable: traefik\nnode-taint: node-role.kubernetes.io/control-plane:NoExecute"
|
||||
err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), supervisorPortYAML, "")
|
||||
err := StartK3sCluster(append(tc.Servers, tc.Agents...), supervisorPortYAML, "")
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS:" + *nodeOS)
|
||||
By(tc.Status())
|
||||
tc.KubeConfigFile, err = e2e.GenKubeConfigFile(tc.Servers[0].String())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Checks node and pod status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("Returns pod metrics", func() {
|
||||
|
@ -161,7 +146,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
|
|||
|
||||
It("Runs an interactive command a pod", func() {
|
||||
cmd := "kubectl run busybox --rm -it --restart=Never --image=rancher/mirrored-library-busybox:1.36.1 -- uname -a"
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
@ -172,136 +157,150 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Kills the cluster", func() {
|
||||
err := KillK3sCluster(append(serverNodeNames, agentNodeNames...))
|
||||
err := KillK3sCluster(append(tc.Servers, tc.Agents...))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
Context("Verify CRI-Dockerd :", func() {
|
||||
Context("Verify kubelet config file", func() {
|
||||
It("Starts K3s with no issues", func() {
|
||||
dockerYAML := "docker: true"
|
||||
err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), dockerYAML, dockerYAML)
|
||||
for _, node := range append(tc.Servers, tc.Agents...) {
|
||||
cmd := "mkdir -p --mode=0777 /tmp/kubelet.conf.d; echo 'apiVersion: kubelet.config.k8s.io/v1beta1\nkind: KubeletConfiguration\nshutdownGracePeriod: 19s\nshutdownGracePeriodCriticalPods: 13s' > /tmp/kubelet.conf.d/99-shutdownGracePeriod.conf"
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
By("checking command results: " + res)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
kubeletConfigDirYAML := "kubelet-arg: config-dir=/tmp/kubelet.conf.d"
|
||||
err := StartK3sCluster(append(tc.Servers, tc.Agents...), kubeletConfigDirYAML, kubeletConfigDirYAML)
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS:" + *nodeOS)
|
||||
By(tc.Status())
|
||||
tc.KubeConfigFile, err = e2e.GenKubeConfigFile(tc.Servers[0].String())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Checks node and pod status", func() {
|
||||
By("Fetching node status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("Returns kubelet configuration", func() {
|
||||
for _, node := range append(tc.Servers, tc.Agents...) {
|
||||
cmd := "kubectl get --raw /api/v1/nodes/" + node.String() + "/proxy/configz"
|
||||
Expect(e2e.RunCommand(cmd)).To(ContainSubstring(`"shutdownGracePeriod":"19s","shutdownGracePeriodCriticalPods":"13s"`))
|
||||
}
|
||||
})
|
||||
|
||||
It("Kills the cluster", func() {
|
||||
err := KillK3sCluster(append(tc.Servers, tc.Agents...))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
Context("Verify CRI-Dockerd", func() {
|
||||
It("Starts K3s with no issues", func() {
|
||||
dockerYAML := "docker: true"
|
||||
err := StartK3sCluster(append(tc.Servers, tc.Agents...), dockerYAML, dockerYAML)
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS:" + *nodeOS)
|
||||
By(tc.Status())
|
||||
tc.KubeConfigFile, err = e2e.GenKubeConfigFile(tc.Servers[0].String())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Checks node and pod status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
It("Kills the cluster", func() {
|
||||
err := KillK3sCluster(append(serverNodeNames, agentNodeNames...))
|
||||
err := KillK3sCluster(append(tc.Servers, tc.Agents...))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
Context("Verify prefer-bundled-bin flag", func() {
|
||||
It("Starts K3s with no issues", func() {
|
||||
preferBundledYAML := "prefer-bundled-bin: true"
|
||||
err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), preferBundledYAML, preferBundledYAML)
|
||||
err := StartK3sCluster(append(tc.Servers, tc.Agents...), preferBundledYAML, preferBundledYAML)
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS:" + *nodeOS)
|
||||
By(tc.Status())
|
||||
tc.KubeConfigFile, err = e2e.GenKubeConfigFile(tc.Servers[0].String())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Checks node and pod status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
It("Kills the cluster", func() {
|
||||
err := KillK3sCluster(append(serverNodeNames, agentNodeNames...))
|
||||
err := KillK3sCluster(append(tc.Servers, tc.Agents...))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
Context("Verify disable-agent and egress-selector-mode flags", func() {
|
||||
It("Starts K3s with no issues", func() {
|
||||
disableAgentYAML := "disable-agent: true\negress-selector-mode: cluster"
|
||||
err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), disableAgentYAML, "")
|
||||
err := StartK3sCluster(append(tc.Servers, tc.Agents...), disableAgentYAML, "")
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS:" + *nodeOS)
|
||||
By(tc.Status())
|
||||
tc.KubeConfigFile, err = e2e.GenKubeConfigFile(tc.Servers[0].String())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Checks node and pod status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("Returns pod metrics", func() {
|
||||
|
@ -320,7 +319,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
|
|||
|
||||
It("Runs an interactive command a pod", func() {
|
||||
cmd := "kubectl run busybox --rm -it --restart=Never --image=rancher/mirrored-library-busybox:1.36.1 -- uname -a"
|
||||
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
@ -331,57 +330,56 @@ var _ = Describe("Various Startup Configurations", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Kills the cluster", func() {
|
||||
err := KillK3sCluster(append(serverNodeNames, agentNodeNames...))
|
||||
err := KillK3sCluster(append(tc.Servers, tc.Agents...))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
Context("Verify server picks up preloaded images on start", func() {
|
||||
It("Downloads and preloads images", func() {
|
||||
_, err := e2e.RunCmdOnNode("docker pull ranchertest/mytestcontainer:latest", serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode("docker pull ranchertest/mytestcontainer:latest")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = e2e.RunCmdOnNode("docker save ranchertest/mytestcontainer:latest -o /tmp/mytestcontainer.tar", serverNodeNames[0])
|
||||
_, err = tc.Servers[0].RunCmdOnNode("docker save ranchertest/mytestcontainer:latest -o /tmp/mytestcontainer.tar")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = e2e.RunCmdOnNode("mkdir -p /var/lib/rancher/k3s/agent/images/", serverNodeNames[0])
|
||||
_, err = tc.Servers[0].RunCmdOnNode("mkdir -p /var/lib/rancher/k3s/agent/images/")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = e2e.RunCmdOnNode("mv /tmp/mytestcontainer.tar /var/lib/rancher/k3s/agent/images/", serverNodeNames[0])
|
||||
_, err = tc.Servers[0].RunCmdOnNode("mv /tmp/mytestcontainer.tar /var/lib/rancher/k3s/agent/images/")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
It("Starts K3s with no issues", func() {
|
||||
err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), "", "")
|
||||
err := StartK3sCluster(append(tc.Servers, tc.Agents...), "", "")
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS:" + *nodeOS)
|
||||
By(tc.Status())
|
||||
tc.KubeConfigFile, err = e2e.GenKubeConfigFile(tc.Servers[0].String())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
It("has loaded the test container image", func() {
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "k3s crictl images | grep ranchertest/mytestcontainer"
|
||||
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
|
||||
return tc.Servers[0].RunCmdOnNode(cmd)
|
||||
}, "120s", "5s").Should(ContainSubstring("ranchertest/mytestcontainer"))
|
||||
})
|
||||
It("Kills the cluster", func() {
|
||||
err := KillK3sCluster(append(serverNodeNames, agentNodeNames...))
|
||||
err := KillK3sCluster(append(tc.Servers, tc.Agents...))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
Context("Verify server fails to start with bootstrap token", func() {
|
||||
It("Fails to start with a meaningful error", func() {
|
||||
tokenYAML := "token: aaaaaa.bbbbbbbbbbbbbbbb"
|
||||
err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), tokenYAML, tokenYAML)
|
||||
err := StartK3sCluster(append(tc.Servers, tc.Agents...), tokenYAML, tokenYAML)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Eventually(func(g Gomega) {
|
||||
logs, err := e2e.GetJournalLogs(serverNodeNames[0])
|
||||
logs, err := tc.Servers[0].GetJournalLogs()
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(logs).To(ContainSubstring("failed to normalize server token"))
|
||||
}, "120s", "5s").Should(Succeed())
|
||||
|
||||
})
|
||||
It("Kills the cluster", func() {
|
||||
err := KillK3sCluster(append(serverNodeNames, agentNodeNames...))
|
||||
err := KillK3sCluster(append(tc.Servers, tc.Agents...))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
@ -394,13 +392,13 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
AddReportEntry("config", e2e.GetConfig(append(serverNodeNames, agentNodeNames...)))
|
||||
Expect(e2e.SaveJournalLogs(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
AddReportEntry("config", e2e.GetConfig(append(tc.Servers, tc.Agents...)))
|
||||
Expect(e2e.SaveJournalLogs(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"testing"
|
||||
"text/template"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -33,311 +34,312 @@ func Test_E2EPoliciesAndFirewall(t *testing.T) {
|
|||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
nodes []e2e.Node
|
||||
tc *e2e.TestConfig
|
||||
nodes []e2e.Node
|
||||
)
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
var _ = Describe("Verify Services Traffic policies and firewall config", Ordered, func() {
|
||||
|
||||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Checks Node Status", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
Context("Start cluster with minimal configuration", func() {
|
||||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
nodes, err = e2e.ParseNodes(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
if *local {
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
}, "300s", "5s").Should(Succeed())
|
||||
_, err := e2e.ParseNodes(kubeConfigFile, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
})
|
||||
|
||||
It("Checks Pod Status", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
It("Checks Node Status", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
var err error
|
||||
nodes, err = e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}
|
||||
}, "300s", "5s").Should(Succeed())
|
||||
_, err := e2e.ParsePods(kubeConfigFile, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Verifies that the service with external traffic policy=local is deployed
|
||||
// Verifies that the external-ip is only set to the node IP where the server runs
|
||||
// It also verifies that the service with external traffic policy=cluster has both node IPs as externalIP
|
||||
It("Verify external traffic policy=local gets set up correctly", func() {
|
||||
_, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "loadbalancer not deployed")
|
||||
_, err = e2e.DeployWorkload("loadbalancer-extTrafficPol.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "loadbalancer-extTrafficPol not deployed")
|
||||
|
||||
// Check where the server pod is running
|
||||
var serverNodeName string
|
||||
Eventually(func() (string, error) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to parse pods")
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "test-loadbalancer-ext") {
|
||||
serverNodeName = pod.Node
|
||||
break
|
||||
}
|
||||
}
|
||||
return serverNodeName, nil
|
||||
}, "25s", "5s").ShouldNot(BeEmpty(), "server pod not found")
|
||||
|
||||
var serverNodeIP string
|
||||
for _, node := range nodes {
|
||||
if node.Name == serverNodeName {
|
||||
serverNodeIP = node.InternalIP
|
||||
}
|
||||
}
|
||||
|
||||
// Verify there is only one external-ip and it is matching the node IP
|
||||
lbSvc := "nginx-loadbalancer-svc"
|
||||
lbSvcExt := "nginx-loadbalancer-svc-ext"
|
||||
Eventually(func() ([]string, error) {
|
||||
return e2e.FetchExternalIPs(kubeConfigFile, lbSvc)
|
||||
}, "25s", "5s").Should(HaveLen(2), "external IP count not equal to 2")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
externalIPs, _ := e2e.FetchExternalIPs(kubeConfigFile, lbSvcExt)
|
||||
g.Expect(externalIPs).To(HaveLen(1), "more than 1 exernalIP found")
|
||||
g.Expect(externalIPs[0]).To(Equal(serverNodeIP), "external IP does not match servernodeIP")
|
||||
}, "25s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
// Verifies that the service is reachable from the outside and the source IP is nos MASQ
|
||||
// It also verifies that the service with external traffic policy=cluster can be accessed and the source IP is MASQ
|
||||
It("Verify connectivity in external traffic policy=local", func() {
|
||||
lbSvc := "nginx-loadbalancer-svc"
|
||||
lbSvcExternalIPs, _ := e2e.FetchExternalIPs(kubeConfigFile, lbSvc)
|
||||
lbSvcExt := "nginx-loadbalancer-svc-ext"
|
||||
lbSvcExtExternalIPs, _ := e2e.FetchExternalIPs(kubeConfigFile, lbSvcExt)
|
||||
|
||||
// Verify connectivity to the external IP of the lbsvc service and the IP should be the flannel interface IP because of MASQ
|
||||
for _, externalIP := range lbSvcExternalIPs {
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "curl -s " + externalIP + ":81/ip"
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "25s", "5s").Should(ContainSubstring("10.42"))
|
||||
}
|
||||
|
||||
// Verify connectivity to the external IP of the lbsvcExt service and the IP should not be the flannel interface IP
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "curl -s " + lbSvcExtExternalIPs[0] + ":82/ip"
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "25s", "5s").ShouldNot(ContainSubstring("10.42"))
|
||||
|
||||
// Verify connectivity to the other nodeIP does not work because of external traffic policy=local
|
||||
for _, externalIP := range lbSvcExternalIPs {
|
||||
if externalIP == lbSvcExtExternalIPs[0] {
|
||||
// This IP we already test and it shuold work
|
||||
continue
|
||||
}
|
||||
Eventually(func() error {
|
||||
cmd := "curl -s --max-time 5 " + externalIP + ":82/ip"
|
||||
_, err := e2e.RunCommand(cmd)
|
||||
return err
|
||||
}, "40s", "5s").Should(MatchError(ContainSubstring("exit status")))
|
||||
}
|
||||
})
|
||||
|
||||
// Verifies that the internal traffic policy=local is deployed
|
||||
It("Verify internal traffic policy=local gets set up correctly", func() {
|
||||
_, err := e2e.DeployWorkload("loadbalancer-intTrafficPol.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "loadbalancer-intTrafficPol not deployed")
|
||||
_, err = e2e.DeployWorkload("pod_client.yaml", kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "pod client not deployed")
|
||||
|
||||
// Check that service exists
|
||||
Eventually(func() (string, error) {
|
||||
clusterIP, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-loadbalancer-svc-int", false)
|
||||
return clusterIP, nil
|
||||
}, "25s", "5s").Should(ContainSubstring("10.43"))
|
||||
|
||||
// Check that client pods are running
|
||||
Eventually(func() string {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
}, "300s", "5s").Should(Succeed())
|
||||
_, err := e2e.ParseNodes(tc.KubeConfigFile, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "client-deployment") {
|
||||
return pod.Status
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}, "50s", "5s").Should(Equal("Running"))
|
||||
})
|
||||
})
|
||||
|
||||
// Verifies that only the client pod running in the same node as the server pod can access the service
|
||||
// It also verifies that the service with internal traffic policy=cluster can be accessed by both client pods
|
||||
It("Verify connectivity in internal traffic policy=local", func() {
|
||||
var clientPod1, clientPod1Node, clientPod1IP, clientPod2, clientPod2Node, clientPod2IP, serverNodeName string
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to parse pods")
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "test-loadbalancer-int") {
|
||||
serverNodeName = pod.Node
|
||||
}
|
||||
if strings.Contains(pod.Name, "client-deployment") {
|
||||
if clientPod1 == "" {
|
||||
clientPod1 = pod.Name
|
||||
clientPod1Node = pod.Node
|
||||
clientPod1IP = pod.IP
|
||||
} else {
|
||||
clientPod2 = pod.Name
|
||||
clientPod2Node = pod.Node
|
||||
clientPod2IP = pod.IP
|
||||
It("Checks Pod Status", func() {
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "300s", "5s").Should(Succeed())
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
})
|
||||
Context("Deploy external traffic workloads to test external traffic policies", func() {
|
||||
// Verifies that the service with external traffic policy=local is deployed
|
||||
// Verifies that the external-ip is only set to the node IP where the server runs
|
||||
// It also verifies that the service with external traffic policy=cluster has both node IPs as externalIP
|
||||
It("Verify external traffic policy=local gets set up correctly", func() {
|
||||
_, err := tc.DeployWorkload("loadbalancer.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "loadbalancer not deployed")
|
||||
_, err = tc.DeployWorkload("loadbalancer-extTrafficPol.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "loadbalancer-extTrafficPol not deployed")
|
||||
|
||||
// Check where the server pod is running
|
||||
var serverNodeName string
|
||||
Eventually(func() (string, error) {
|
||||
pods, err := tests.ParsePods(tc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to parse pods")
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "test-loadbalancer-ext") {
|
||||
serverNodeName = pod.Spec.NodeName
|
||||
break
|
||||
}
|
||||
}
|
||||
return serverNodeName, nil
|
||||
}, "25s", "5s").ShouldNot(BeEmpty(), "server pod not found")
|
||||
|
||||
var serverNodeIP string
|
||||
for _, node := range nodes {
|
||||
if node.Name == serverNodeName {
|
||||
serverNodeIP = node.InternalIP
|
||||
}
|
||||
}
|
||||
// As we need those variables for the connectivity test, let's check they are not emtpy
|
||||
g.Expect(serverNodeName).ShouldNot(BeEmpty(), "server pod for internalTrafficPolicy=local not found")
|
||||
g.Expect(clientPod1).ShouldNot(BeEmpty(), "client pod1 not found")
|
||||
g.Expect(clientPod2).ShouldNot(BeEmpty(), "client pod2 not found")
|
||||
g.Expect(clientPod1Node).ShouldNot(BeEmpty(), "client pod1 node not found")
|
||||
g.Expect(clientPod2Node).ShouldNot(BeEmpty(), "client pod2 node not found")
|
||||
g.Expect(clientPod1IP).ShouldNot(BeEmpty(), "client pod1 IP not found")
|
||||
g.Expect(clientPod2IP).ShouldNot(BeEmpty(), "client pod2 IP not found")
|
||||
}, "25s", "5s").Should(Succeed(), "All pod and names and IPs should be non-empty")
|
||||
|
||||
// Check that clientPod1Node and clientPod2Node are not equal
|
||||
Expect(clientPod1Node).ShouldNot(Equal(clientPod2Node))
|
||||
// Verify there is only one external-ip and it is matching the node IP
|
||||
lbSvc := "nginx-loadbalancer-svc"
|
||||
lbSvcExt := "nginx-loadbalancer-svc-ext"
|
||||
Eventually(func() ([]string, error) {
|
||||
return e2e.FetchExternalIPs(tc.KubeConfigFile, lbSvc)
|
||||
}, "25s", "5s").Should(HaveLen(2), "external IP count not equal to 2")
|
||||
|
||||
var workingCmd, nonWorkingCmd string
|
||||
if serverNodeName == clientPod1Node {
|
||||
workingCmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec " + clientPod1 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip"
|
||||
nonWorkingCmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec " + clientPod2 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip"
|
||||
}
|
||||
if serverNodeName == clientPod2Node {
|
||||
workingCmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec " + clientPod2 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip"
|
||||
nonWorkingCmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec " + clientPod1 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip"
|
||||
}
|
||||
Eventually(func(g Gomega) {
|
||||
externalIPs, _ := e2e.FetchExternalIPs(tc.KubeConfigFile, lbSvcExt)
|
||||
g.Expect(externalIPs).To(HaveLen(1), "more than 1 exernalIP found")
|
||||
g.Expect(externalIPs[0]).To(Equal(serverNodeIP), "external IP does not match servernodeIP")
|
||||
}, "25s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
out, err := e2e.RunCommand(workingCmd)
|
||||
return out, err
|
||||
}, "25s", "5s").Should(SatisfyAny(
|
||||
ContainSubstring(clientPod1IP),
|
||||
ContainSubstring(clientPod2IP),
|
||||
))
|
||||
// Verifies that the service is reachable from the outside and the source IP is nos MASQ
|
||||
// It also verifies that the service with external traffic policy=cluster can be accessed and the source IP is MASQ
|
||||
It("Verify connectivity in external traffic policy=local", func() {
|
||||
lbSvc := "nginx-loadbalancer-svc"
|
||||
lbSvcExternalIPs, _ := e2e.FetchExternalIPs(tc.KubeConfigFile, lbSvc)
|
||||
lbSvcExt := "nginx-loadbalancer-svc-ext"
|
||||
lbSvcExtExternalIPs, _ := e2e.FetchExternalIPs(tc.KubeConfigFile, lbSvcExt)
|
||||
|
||||
// Check the non working command fails because of internal traffic policy=local
|
||||
Eventually(func() bool {
|
||||
_, err := e2e.RunCommand(nonWorkingCmd)
|
||||
if err != nil && strings.Contains(err.Error(), "exit status") {
|
||||
// Treat exit status as a successful condition
|
||||
return true
|
||||
// Verify connectivity to the external IP of the lbsvc service and the IP should be the flannel interface IP because of MASQ
|
||||
for _, externalIP := range lbSvcExternalIPs {
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "curl -s " + externalIP + ":81/ip"
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "25s", "5s").Should(ContainSubstring("10.42"))
|
||||
}
|
||||
return false
|
||||
}, "40s", "5s").Should(BeTrue())
|
||||
|
||||
// curling a service with internal traffic policy=cluster. It should work on both pods
|
||||
for _, pod := range []string{clientPod1, clientPod2} {
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec " + pod + " -- curl -s --max-time 5 nginx-loadbalancer-svc:81/ip"
|
||||
// Verify connectivity to the external IP of the lbsvcExt service and the IP should not be the flannel interface IP
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "curl -s " + lbSvcExtExternalIPs[0] + ":82/ip"
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "20s", "5s").Should(SatisfyAny(
|
||||
}, "25s", "5s").ShouldNot(ContainSubstring("10.42"))
|
||||
|
||||
// Verify connectivity to the other nodeIP does not work because of external traffic policy=local
|
||||
for _, externalIP := range lbSvcExternalIPs {
|
||||
if externalIP == lbSvcExtExternalIPs[0] {
|
||||
// This IP we already test and it shuold work
|
||||
continue
|
||||
}
|
||||
Eventually(func() error {
|
||||
cmd := "curl -s --max-time 5 " + externalIP + ":82/ip"
|
||||
_, err := e2e.RunCommand(cmd)
|
||||
return err
|
||||
}, "40s", "5s").Should(MatchError(ContainSubstring("exit status")))
|
||||
}
|
||||
})
|
||||
|
||||
// Verifies that the internal traffic policy=local is deployed
|
||||
It("Verify internal traffic policy=local gets set up correctly", func() {
|
||||
_, err := tc.DeployWorkload("loadbalancer-intTrafficPol.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "loadbalancer-intTrafficPol not deployed")
|
||||
_, err = tc.DeployWorkload("pod_client.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "pod client not deployed")
|
||||
|
||||
// Check that service exists
|
||||
Eventually(func() (string, error) {
|
||||
clusterIP, _ := e2e.FetchClusterIP(tc.KubeConfigFile, "nginx-loadbalancer-svc-int", false)
|
||||
return clusterIP, nil
|
||||
}, "25s", "5s").Should(ContainSubstring("10.43"))
|
||||
|
||||
// Check that client pods are running
|
||||
Eventually(func() string {
|
||||
pods, err := tests.ParsePods(tc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "client-deployment") {
|
||||
return string(pod.Status.Phase)
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}, "50s", "5s").Should(Equal("Running"))
|
||||
})
|
||||
|
||||
// Verifies that only the client pod running in the same node as the server pod can access the service
|
||||
// It also verifies that the service with internal traffic policy=cluster can be accessed by both client pods
|
||||
It("Verify connectivity in internal traffic policy=local", func() {
|
||||
var clientPod1, clientPod1Node, clientPod1IP, clientPod2, clientPod2Node, clientPod2IP, serverNodeName string
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := tests.ParsePods(tc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to parse pods")
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "test-loadbalancer-int") {
|
||||
serverNodeName = pod.Spec.NodeName
|
||||
}
|
||||
if strings.Contains(pod.Name, "client-deployment") {
|
||||
if clientPod1 == "" {
|
||||
clientPod1 = pod.Name
|
||||
clientPod1Node = pod.Spec.NodeName
|
||||
clientPod1IP = pod.Status.PodIP
|
||||
} else {
|
||||
clientPod2 = pod.Name
|
||||
clientPod2Node = pod.Spec.NodeName
|
||||
clientPod2IP = pod.Status.PodIP
|
||||
}
|
||||
}
|
||||
}
|
||||
// As we need those variables for the connectivity test, let's check they are not emtpy
|
||||
g.Expect(serverNodeName).ShouldNot(BeEmpty(), "server pod for internalTrafficPolicy=local not found")
|
||||
g.Expect(clientPod1).ShouldNot(BeEmpty(), "client pod1 not found")
|
||||
g.Expect(clientPod2).ShouldNot(BeEmpty(), "client pod2 not found")
|
||||
g.Expect(clientPod1Node).ShouldNot(BeEmpty(), "client pod1 node not found")
|
||||
g.Expect(clientPod2Node).ShouldNot(BeEmpty(), "client pod2 node not found")
|
||||
g.Expect(clientPod1IP).ShouldNot(BeEmpty(), "client pod1 IP not found")
|
||||
g.Expect(clientPod2IP).ShouldNot(BeEmpty(), "client pod2 IP not found")
|
||||
}, "25s", "5s").Should(Succeed(), "All pod and names and IPs should be non-empty")
|
||||
|
||||
// Check that clientPod1Node and clientPod2Node are not equal
|
||||
Expect(clientPod1Node).ShouldNot(Equal(clientPod2Node))
|
||||
|
||||
var workingCmd, nonWorkingCmd string
|
||||
if serverNodeName == clientPod1Node {
|
||||
workingCmd = "kubectl exec " + clientPod1 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip"
|
||||
nonWorkingCmd = "kubectl exec " + clientPod2 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip"
|
||||
}
|
||||
if serverNodeName == clientPod2Node {
|
||||
workingCmd = "kubectl exec " + clientPod2 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip"
|
||||
nonWorkingCmd = "kubectl exec " + clientPod1 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip"
|
||||
}
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
out, err := e2e.RunCommand(workingCmd)
|
||||
return out, err
|
||||
}, "25s", "5s").Should(SatisfyAny(
|
||||
ContainSubstring(clientPod1IP),
|
||||
ContainSubstring(clientPod2IP),
|
||||
))
|
||||
}
|
||||
})
|
||||
|
||||
// Set up the service manifest with loadBalancerSourceRanges
|
||||
It("Applies service manifest with loadBalancerSourceRanges", func() {
|
||||
// Define the service manifest with a placeholder for the IP
|
||||
serviceManifest := `
|
||||
// Check the non working command fails because of internal traffic policy=local
|
||||
Eventually(func() bool {
|
||||
_, err := e2e.RunCommand(nonWorkingCmd)
|
||||
if err != nil && strings.Contains(err.Error(), "exit status") {
|
||||
// Treat exit status as a successful condition
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}, "40s", "5s").Should(BeTrue())
|
||||
|
||||
// curling a service with internal traffic policy=cluster. It should work on both pods
|
||||
for _, pod := range []string{clientPod1, clientPod2} {
|
||||
cmd := "kubectl exec " + pod + " -- curl -s --max-time 5 nginx-loadbalancer-svc:81/ip"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "20s", "5s").Should(SatisfyAny(
|
||||
ContainSubstring(clientPod1IP),
|
||||
ContainSubstring(clientPod2IP),
|
||||
))
|
||||
}
|
||||
})
|
||||
|
||||
// Set up the service manifest with loadBalancerSourceRanges
|
||||
It("Applies service manifest with loadBalancerSourceRanges", func() {
|
||||
// Define the service manifest with a placeholder for the IP
|
||||
serviceManifest := `
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx-loadbalancer-svc-ext-firewall
|
||||
name: nginx-loadbalancer-svc-ext-firewall
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
loadBalancerSourceRanges:
|
||||
- {{.NodeIP}}/32
|
||||
ports:
|
||||
- port: 82
|
||||
targetPort: 80
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
k8s-app: nginx-app-loadbalancer-ext
|
||||
type: LoadBalancer
|
||||
loadBalancerSourceRanges:
|
||||
- {{.NodeIP}}/32
|
||||
ports:
|
||||
- port: 82
|
||||
targetPort: 80
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
k8s-app: nginx-app-loadbalancer-ext
|
||||
`
|
||||
// Remove the service nginx-loadbalancer-svc-ext
|
||||
_, err := e2e.RunCommand("kubectl --kubeconfig=" + kubeConfigFile + " delete svc nginx-loadbalancer-svc-ext")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to remove service nginx-loadbalancer-svc-ext")
|
||||
// Remove the service nginx-loadbalancer-svc-ext
|
||||
_, err := e2e.RunCommand("kubectl delete svc nginx-loadbalancer-svc-ext")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to remove service nginx-loadbalancer-svc-ext")
|
||||
|
||||
// Parse and execute the template with the node IP
|
||||
tmpl, err := template.New("service").Parse(serviceManifest)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Parse and execute the template with the node IP
|
||||
tmpl, err := template.New("service").Parse(serviceManifest)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var filledManifest strings.Builder
|
||||
err = tmpl.Execute(&filledManifest, struct{ NodeIP string }{NodeIP: nodes[0].InternalIP})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var filledManifest strings.Builder
|
||||
err = tmpl.Execute(&filledManifest, struct{ NodeIP string }{NodeIP: nodes[0].InternalIP})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Write the filled manifest to a temporary file
|
||||
tmpFile, err := os.CreateTemp("", "service-*.yaml")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer os.Remove(tmpFile.Name())
|
||||
// Write the filled manifest to a temporary file
|
||||
tmpFile, err := os.CreateTemp("", "service-*.yaml")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer os.Remove(tmpFile.Name())
|
||||
|
||||
_, err = tmpFile.WriteString(filledManifest.String())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
tmpFile.Close()
|
||||
_, err = tmpFile.WriteString(filledManifest.String())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
tmpFile.Close()
|
||||
|
||||
// Apply the manifest using kubectl
|
||||
applyCmd := fmt.Sprintf("kubectl --kubeconfig=%s apply -f %s", kubeConfigFile, tmpFile.Name())
|
||||
out, err := e2e.RunCommand(applyCmd)
|
||||
Expect(err).NotTo(HaveOccurred(), out)
|
||||
// Apply the manifest using kubectl
|
||||
applyCmd := fmt.Sprintf("kubectl apply -f %s", tmpFile.Name())
|
||||
out, err := e2e.RunCommand(applyCmd)
|
||||
Expect(err).NotTo(HaveOccurred(), out)
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
clusterIP, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-loadbalancer-svc-ext-firewall", false)
|
||||
return clusterIP, nil
|
||||
}, "25s", "5s").Should(ContainSubstring("10.43"))
|
||||
})
|
||||
|
||||
// Verify that only the allowed node can curl. That node should be able to curl both externalIPs (i.e. node.InternalIP)
|
||||
It("Verify firewall is working", func() {
|
||||
for _, node := range nodes {
|
||||
// Verify connectivity from nodes[0] works because we passed its IP to the loadBalancerSourceRanges
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "curl -s --max-time 5 " + node.InternalIP + ":82"
|
||||
return e2e.RunCmdOnNode(cmd, nodes[0].Name)
|
||||
}, "40s", "5s").Should(ContainSubstring("Welcome to nginx"))
|
||||
clusterIP, _ := e2e.FetchClusterIP(tc.KubeConfigFile, "nginx-loadbalancer-svc-ext-firewall", false)
|
||||
return clusterIP, nil
|
||||
}, "25s", "5s").Should(ContainSubstring("10.43"))
|
||||
})
|
||||
|
||||
// Verify connectivity from nodes[1] fails because we did not pass its IP to the loadBalancerSourceRanges
|
||||
Eventually(func(g Gomega) error {
|
||||
cmd := "curl -s --max-time 5 " + node.InternalIP + ":82"
|
||||
_, err := e2e.RunCmdOnNode(cmd, nodes[1].Name)
|
||||
return err
|
||||
}, "40s", "5s").Should(MatchError(ContainSubstring("exit status")))
|
||||
}
|
||||
// Verify that only the allowed node can curl. That node should be able to curl both externalIPs (i.e. node.InternalIP)
|
||||
It("Verify firewall is working", func() {
|
||||
for _, node := range nodes {
|
||||
var sNode, aNode e2e.VagrantNode
|
||||
for _, n := range tc.Servers {
|
||||
if n.String() == nodes[0].Name {
|
||||
sNode = n
|
||||
}
|
||||
}
|
||||
for _, n := range tc.Agents {
|
||||
if n.String() == nodes[1].Name {
|
||||
aNode = n
|
||||
}
|
||||
}
|
||||
|
||||
// Verify connectivity from nodes[0] works because we passed its IP to the loadBalancerSourceRanges
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "curl -s --max-time 5 " + node.InternalIP + ":82"
|
||||
return sNode.RunCmdOnNode(cmd)
|
||||
}, "40s", "5s").Should(ContainSubstring("Welcome to nginx"))
|
||||
|
||||
// Verify connectivity from nodes[1] fails because we did not pass its IP to the loadBalancerSourceRanges
|
||||
Eventually(func(g Gomega) error {
|
||||
cmd := "curl -s --max-time 5 " + node.InternalIP + ":82"
|
||||
_, err := aNode.RunCmdOnNode(cmd)
|
||||
return err
|
||||
}, "40s", "5s").Should(MatchError(ContainSubstring("exit status")))
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -348,12 +350,12 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...)))
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(tc.Servers, tc.Agents...)))
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -25,11 +25,7 @@ func Test_E2ETailscale(t *testing.T) {
|
|||
RunSpecs(t, "Tailscale Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
|
@ -38,61 +34,59 @@ var _ = Describe("Verify Tailscale Configuration", Ordered, func() {
|
|||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Server node needs to be ready before we continue
|
||||
It("Checks Node Status", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "300s", "5s").Should(Succeed())
|
||||
_, err := e2e.ParseNodes(kubeConfigFile, true)
|
||||
_, err := e2e.ParseNodes(tc.KubeConfigFile, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Change agent's config", func() {
|
||||
nodeIPs, _ := e2e.GetNodeIPs(kubeConfigFile)
|
||||
nodeIPs, _ := e2e.GetNodeIPs(tc.KubeConfigFile)
|
||||
cmd := fmt.Sprintf("sudo sed -i 's/TAILSCALEIP/%s/g' /etc/rancher/k3s/config.yaml", nodeIPs[0].IPv4)
|
||||
for _, agent := range agentNodeNames {
|
||||
_, err := e2e.RunCmdOnNode(cmd, agent)
|
||||
for _, agent := range tc.Agents {
|
||||
_, err := agent.RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
It("Restart agents", func() {
|
||||
err := e2e.RestartCluster(agentNodeNames)
|
||||
err := e2e.RestartCluster(tc.Agents)
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
})
|
||||
|
||||
It("Checks Node Status", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(len(nodes)).To(Equal(*agentCount + *serverCount))
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "300s", "5s").Should(Succeed())
|
||||
_, err := e2e.ParseNodes(kubeConfigFile, true)
|
||||
_, err := e2e.ParseNodes(tc.KubeConfigFile, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Verifies that server and agent have a tailscale IP as nodeIP", func() {
|
||||
nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile)
|
||||
nodeIPs, err := e2e.GetNodeIPs(tc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodeIPs {
|
||||
Expect(node.IPv4).Should(ContainSubstring("100."))
|
||||
|
@ -102,8 +96,8 @@ var _ = Describe("Verify Tailscale Configuration", Ordered, func() {
|
|||
It("Verify routing is correct and uses tailscale0 interface for internode traffic", func() {
|
||||
// table 52 is the one configured by tailscale
|
||||
cmd := "ip route show table 52"
|
||||
for _, node := range append(serverNodeNames, agentNodeNames...) {
|
||||
output, err := e2e.RunCmdOnNode(cmd, node)
|
||||
for _, node := range append(tc.Servers, tc.Agents...) {
|
||||
output, err := node.RunCmdOnNode(cmd)
|
||||
fmt.Println(err)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(output).Should(ContainSubstring("10.42."))
|
||||
|
@ -119,12 +113,12 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...)))
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(tc.Servers, tc.Agents...)))
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -18,6 +18,39 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// defining the VagrantNode type allows methods like RunCmdOnNode to be defined on it.
|
||||
// This makes test code more consistent, as similar functions can exists in Docker and E2E tests.
|
||||
type VagrantNode string
|
||||
|
||||
func (v VagrantNode) String() string {
|
||||
return string(v)
|
||||
}
|
||||
|
||||
func VagrantSlice(v []VagrantNode) []string {
|
||||
nodes := make([]string, 0, len(v))
|
||||
for _, node := range v {
|
||||
nodes = append(nodes, node.String())
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
type TestConfig struct {
|
||||
Hardened bool
|
||||
KubeConfigFile string
|
||||
Servers []VagrantNode
|
||||
Agents []VagrantNode
|
||||
}
|
||||
|
||||
func (tc *TestConfig) Status() string {
|
||||
sN := strings.Join(VagrantSlice(tc.Servers), " ")
|
||||
aN := strings.Join(VagrantSlice(tc.Agents), " ")
|
||||
hardened := ""
|
||||
if tc.Hardened {
|
||||
hardened = "Hardened: true\n"
|
||||
}
|
||||
return fmt.Sprintf("%sKubeconfig: %s\nServers Nodes: %s\nAgents Nodes: %s\n)", hardened, tc.KubeConfigFile, sN, aN)
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
Name string
|
||||
Status string
|
||||
|
@ -30,18 +63,8 @@ func (n Node) String() string {
|
|||
return fmt.Sprintf("Node (name: %s, status: %s, roles: %s)", n.Name, n.Status, n.Roles)
|
||||
}
|
||||
|
||||
type Pod struct {
|
||||
NameSpace string
|
||||
Name string
|
||||
Ready string
|
||||
Status string
|
||||
Restarts string
|
||||
IP string
|
||||
Node string
|
||||
}
|
||||
|
||||
type NodeError struct {
|
||||
Node string
|
||||
Node VagrantNode
|
||||
Cmd string
|
||||
Err error
|
||||
}
|
||||
|
@ -65,7 +88,7 @@ func (ne *NodeError) Unwrap() error {
|
|||
return ne.Err
|
||||
}
|
||||
|
||||
func newNodeError(cmd, node string, err error) *NodeError {
|
||||
func newNodeError(cmd string, node VagrantNode, err error) *NodeError {
|
||||
return &NodeError{
|
||||
Cmd: cmd,
|
||||
Node: node,
|
||||
|
@ -73,28 +96,18 @@ func newNodeError(cmd, node string, err error) *NodeError {
|
|||
}
|
||||
}
|
||||
|
||||
func CountOfStringInSlice(str string, pods []Pod) int {
|
||||
count := 0
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, str) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// genNodeEnvs generates the node and testing environment variables for vagrant up
|
||||
func genNodeEnvs(nodeOS string, serverCount, agentCount int) ([]string, []string, string) {
|
||||
serverNodeNames := make([]string, serverCount)
|
||||
func genNodeEnvs(nodeOS string, serverCount, agentCount int) ([]VagrantNode, []VagrantNode, string) {
|
||||
serverNodes := make([]VagrantNode, serverCount)
|
||||
for i := 0; i < serverCount; i++ {
|
||||
serverNodeNames[i] = "server-" + strconv.Itoa(i)
|
||||
serverNodes[i] = VagrantNode("server-" + strconv.Itoa(i))
|
||||
}
|
||||
agentNodeNames := make([]string, agentCount)
|
||||
agentNodes := make([]VagrantNode, agentCount)
|
||||
for i := 0; i < agentCount; i++ {
|
||||
agentNodeNames[i] = "agent-" + strconv.Itoa(i)
|
||||
agentNodes[i] = VagrantNode("agent-" + strconv.Itoa(i))
|
||||
}
|
||||
|
||||
nodeRoles := strings.Join(serverNodeNames, " ") + " " + strings.Join(agentNodeNames, " ")
|
||||
nodeRoles := strings.Join(VagrantSlice(serverNodes), " ") + " " + strings.Join(VagrantSlice(agentNodes), " ")
|
||||
nodeRoles = strings.TrimSpace(nodeRoles)
|
||||
|
||||
nodeBoxes := strings.Repeat(nodeOS+" ", serverCount+agentCount)
|
||||
|
@ -102,12 +115,12 @@ func genNodeEnvs(nodeOS string, serverCount, agentCount int) ([]string, []string
|
|||
|
||||
nodeEnvs := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s"`, nodeRoles, nodeBoxes)
|
||||
|
||||
return serverNodeNames, agentNodeNames, nodeEnvs
|
||||
return serverNodes, agentNodes, nodeEnvs
|
||||
}
|
||||
|
||||
func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []string, error) {
|
||||
func CreateCluster(nodeOS string, serverCount, agentCount int) (*TestConfig, error) {
|
||||
|
||||
serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount)
|
||||
serverNodes, agentNodes, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount)
|
||||
|
||||
var testOptions string
|
||||
for _, env := range os.Environ() {
|
||||
|
@ -116,16 +129,16 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []stri
|
|||
}
|
||||
}
|
||||
// Bring up the first server node
|
||||
cmd := fmt.Sprintf(`%s %s vagrant up %s &> vagrant.log`, nodeEnvs, testOptions, serverNodeNames[0])
|
||||
cmd := fmt.Sprintf(`%s %s vagrant up %s &> vagrant.log`, nodeEnvs, testOptions, serverNodes[0])
|
||||
fmt.Println(cmd)
|
||||
if _, err := RunCommand(cmd); err != nil {
|
||||
return nil, nil, newNodeError(cmd, serverNodeNames[0], err)
|
||||
return nil, newNodeError(cmd, serverNodes[0], err)
|
||||
}
|
||||
|
||||
// Bring up the rest of the nodes in parallel
|
||||
errg, _ := errgroup.WithContext(context.Background())
|
||||
for _, node := range append(serverNodeNames[1:], agentNodeNames...) {
|
||||
cmd := fmt.Sprintf(`%s %s vagrant up %s &>> vagrant.log`, nodeEnvs, testOptions, node)
|
||||
for _, node := range append(serverNodes[1:], agentNodes...) {
|
||||
cmd := fmt.Sprintf(`%s %s vagrant up %s &>> vagrant.log`, nodeEnvs, testOptions, node.String())
|
||||
fmt.Println(cmd)
|
||||
errg.Go(func() error {
|
||||
if _, err := RunCommand(cmd); err != nil {
|
||||
|
@ -134,26 +147,47 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []stri
|
|||
return nil
|
||||
})
|
||||
// We must wait a bit between provisioning nodes to avoid too many learners attempting to join the cluster
|
||||
if strings.Contains(node, "agent") {
|
||||
if strings.Contains(node.String(), "agent") {
|
||||
time.Sleep(5 * time.Second)
|
||||
} else {
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
}
|
||||
if err := errg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return serverNodeNames, agentNodeNames, nil
|
||||
// For startup test, we don't start the cluster, so check first before
|
||||
// generating the kubeconfig file
|
||||
var kubeConfigFile string
|
||||
res, err := serverNodes[0].RunCmdOnNode("systemctl is-active k3s")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !strings.Contains(res, "inactive") && strings.Contains(res, "active") {
|
||||
kubeConfigFile, err = GenKubeConfigFile(serverNodes[0].String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
tc := &TestConfig{
|
||||
KubeConfigFile: kubeConfigFile,
|
||||
Servers: serverNodes,
|
||||
Agents: agentNodes,
|
||||
}
|
||||
|
||||
return tc, nil
|
||||
}
|
||||
|
||||
func scpK3sBinary(nodeNames []string) error {
|
||||
func scpK3sBinary(nodeNames []VagrantNode) error {
|
||||
for _, node := range nodeNames {
|
||||
cmd := fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node)
|
||||
cmd := fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node.String())
|
||||
if _, err := RunCommand(cmd); err != nil {
|
||||
return fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
|
||||
}
|
||||
if _, err := RunCmdOnNode("mv /tmp/k3s /usr/local/bin/", node); err != nil {
|
||||
cmd = "vagrant ssh " + node.String() + " -c \"sudo mv /tmp/k3s /usr/local/bin/\""
|
||||
if _, err := RunCommand(cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -162,10 +196,9 @@ func scpK3sBinary(nodeNames []string) error {
|
|||
|
||||
// CreateLocalCluster creates a cluster using the locally built k3s binary. The vagrant-scp plugin must be installed for
|
||||
// this function to work. The binary is deployed as an airgapped install of k3s on the VMs.
|
||||
// This is intended only for local testing purposes when writing a new E2E test.
|
||||
func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, []string, error) {
|
||||
func CreateLocalCluster(nodeOS string, serverCount, agentCount int) (*TestConfig, error) {
|
||||
|
||||
serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount)
|
||||
serverNodes, agentNodes, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount)
|
||||
|
||||
var testOptions string
|
||||
var cmd string
|
||||
|
@ -179,15 +212,15 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
|
|||
|
||||
// Provision the first server node. In GitHub Actions, this also imports the VM image into libvirt, which
|
||||
// takes time and can cause the next vagrant up to fail if it is not given enough time to complete.
|
||||
cmd = fmt.Sprintf(`%s %s vagrant up --no-provision %s &> vagrant.log`, nodeEnvs, testOptions, serverNodeNames[0])
|
||||
cmd = fmt.Sprintf(`%s %s vagrant up --no-tty --no-provision %s &> vagrant.log`, nodeEnvs, testOptions, serverNodes[0])
|
||||
fmt.Println(cmd)
|
||||
if _, err := RunCommand(cmd); err != nil {
|
||||
return nil, nil, newNodeError(cmd, serverNodeNames[0], err)
|
||||
return nil, newNodeError(cmd, serverNodes[0], err)
|
||||
}
|
||||
|
||||
// Bring up the rest of the nodes in parallel
|
||||
errg, _ := errgroup.WithContext(context.Background())
|
||||
for _, node := range append(serverNodeNames[1:], agentNodeNames...) {
|
||||
for _, node := range append(serverNodes[1:], agentNodes...) {
|
||||
cmd := fmt.Sprintf(`%s %s vagrant up --no-provision %s &>> vagrant.log`, nodeEnvs, testOptions, node)
|
||||
errg.Go(func() error {
|
||||
if _, err := RunCommand(cmd); err != nil {
|
||||
|
@ -199,15 +232,15 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
|
|||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
if err := errg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := scpK3sBinary(append(serverNodeNames, agentNodeNames...)); err != nil {
|
||||
return nil, nil, err
|
||||
if err := scpK3sBinary(append(serverNodes, agentNodes...)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Install K3s on all nodes in parallel
|
||||
errg, _ = errgroup.WithContext(context.Background())
|
||||
for _, node := range append(serverNodeNames, agentNodeNames...) {
|
||||
for _, node := range append(serverNodes, agentNodes...) {
|
||||
cmd = fmt.Sprintf(`%s %s vagrant provision %s &>> vagrant.log`, nodeEnvs, testOptions, node)
|
||||
errg.Go(func() error {
|
||||
if _, err := RunCommand(cmd); err != nil {
|
||||
|
@ -219,15 +252,34 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
|
|||
time.Sleep(20 * time.Second)
|
||||
}
|
||||
if err := errg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return serverNodeNames, agentNodeNames, nil
|
||||
// For startup test, we don't start the cluster, so check first before generating the kubeconfig file.
|
||||
// Systemctl returns a exit code of 3 when the service is inactive, so we don't check for errors
|
||||
// on the command itself.
|
||||
var kubeConfigFile string
|
||||
var err error
|
||||
res, _ := serverNodes[0].RunCmdOnNode("systemctl is-active k3s")
|
||||
if !strings.Contains(res, "inactive") && strings.Contains(res, "active") {
|
||||
kubeConfigFile, err = GenKubeConfigFile(serverNodes[0].String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
tc := &TestConfig{
|
||||
KubeConfigFile: kubeConfigFile,
|
||||
Servers: serverNodes,
|
||||
Agents: agentNodes,
|
||||
}
|
||||
|
||||
return tc, nil
|
||||
}
|
||||
|
||||
func DeployWorkload(workload, kubeconfig string, hardened bool) (string, error) {
|
||||
func (tc TestConfig) DeployWorkload(workload string) (string, error) {
|
||||
resourceDir := "../amd64_resource_files"
|
||||
if hardened {
|
||||
if tc.Hardened {
|
||||
resourceDir = "../cis_amd64_resource_files"
|
||||
}
|
||||
files, err := os.ReadDir(resourceDir)
|
||||
|
@ -239,7 +291,7 @@ func DeployWorkload(workload, kubeconfig string, hardened bool) (string, error)
|
|||
for _, f := range files {
|
||||
filename := filepath.Join(resourceDir, f.Name())
|
||||
if strings.TrimSpace(f.Name()) == workload {
|
||||
cmd := "kubectl apply -f " + filename + " --kubeconfig=" + kubeconfig
|
||||
cmd := "kubectl apply -f " + filename + " --kubeconfig=" + tc.KubeConfigFile
|
||||
return RunCommand(cmd)
|
||||
}
|
||||
}
|
||||
|
@ -301,9 +353,9 @@ func FetchIngressIP(kubeconfig string) ([]string, error) {
|
|||
return ingressIPs, nil
|
||||
}
|
||||
|
||||
func FetchNodeExternalIP(nodename string) (string, error) {
|
||||
cmd := "vagrant ssh " + nodename + " -c \"ip -f inet addr show eth1| awk '/inet / {print $2}'|cut -d/ -f1\""
|
||||
ipaddr, err := RunCommand(cmd)
|
||||
func (v VagrantNode) FetchNodeExternalIP() (string, error) {
|
||||
cmd := "ip -f inet addr show eth1| awk '/inet / {print $2}'|cut -d/ -f1"
|
||||
ipaddr, err := v.RunCmdOnNode(cmd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -313,9 +365,10 @@ func FetchNodeExternalIP(nodename string) (string, error) {
|
|||
return nodeip, nil
|
||||
}
|
||||
|
||||
func GenKubeConfigFile(serverName string) (string, error) {
|
||||
kubeConfigFile := fmt.Sprintf("kubeconfig-%s", serverName)
|
||||
cmd := fmt.Sprintf("vagrant scp %s:/etc/rancher/k3s/k3s.yaml ./%s", serverName, kubeConfigFile)
|
||||
// GenKubeConfigFile extracts the kubeconfig from the given node and modifies it for use outside the VM.
|
||||
func GenKubeConfigFile(nodeName string) (string, error) {
|
||||
kubeConfigFile := fmt.Sprintf("kubeconfig-%s", nodeName)
|
||||
cmd := fmt.Sprintf("vagrant scp %s:/etc/rancher/k3s/k3s.yaml ./%s", nodeName, kubeConfigFile)
|
||||
_, err := RunCommand(cmd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -328,7 +381,8 @@ func GenKubeConfigFile(serverName string) (string, error) {
|
|||
|
||||
re := regexp.MustCompile(`(?m)==> vagrant:.*\n`)
|
||||
modifiedKubeConfig := re.ReplaceAllString(string(kubeConfig), "")
|
||||
nodeIP, err := FetchNodeExternalIP(serverName)
|
||||
vNode := VagrantNode(nodeName)
|
||||
nodeIP, err := vNode.FetchNodeExternalIP()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -359,16 +413,16 @@ func GenReport(specReport ginkgo.SpecReport) {
|
|||
fmt.Printf("%s", status)
|
||||
}
|
||||
|
||||
func GetJournalLogs(node string) (string, error) {
|
||||
func (v VagrantNode) GetJournalLogs() (string, error) {
|
||||
cmd := "journalctl -u k3s* --no-pager"
|
||||
return RunCmdOnNode(cmd, node)
|
||||
return v.RunCmdOnNode(cmd)
|
||||
}
|
||||
|
||||
func TailJournalLogs(lines int, nodes []string) string {
|
||||
func TailJournalLogs(lines int, nodes []VagrantNode) string {
|
||||
logs := &strings.Builder{}
|
||||
for _, node := range nodes {
|
||||
cmd := fmt.Sprintf("journalctl -u k3s* --no-pager --lines=%d", lines)
|
||||
if l, err := RunCmdOnNode(cmd, node); err != nil {
|
||||
if l, err := node.RunCmdOnNode(cmd); err != nil {
|
||||
fmt.Fprintf(logs, "** failed to read journald log for node %s ***\n%v\n", node, err)
|
||||
} else {
|
||||
fmt.Fprintf(logs, "** journald log for node %s ***\n%s\n", node, l)
|
||||
|
@ -379,14 +433,14 @@ func TailJournalLogs(lines int, nodes []string) string {
|
|||
|
||||
// SaveJournalLogs saves the journal logs of each node to a <NAME>-jlog.txt file.
|
||||
// When used in GHA CI, the logs are uploaded as an artifact on failure.
|
||||
func SaveJournalLogs(nodeNames []string) error {
|
||||
for _, node := range nodeNames {
|
||||
lf, err := os.Create(node + "-jlog.txt")
|
||||
func SaveJournalLogs(nodes []VagrantNode) error {
|
||||
for _, node := range nodes {
|
||||
lf, err := os.Create(node.String() + "-jlog.txt")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lf.Close()
|
||||
logs, err := GetJournalLogs(node)
|
||||
logs, err := node.GetJournalLogs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -397,11 +451,11 @@ func SaveJournalLogs(nodeNames []string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func GetConfig(nodes []string) string {
|
||||
func GetConfig(nodes []VagrantNode) string {
|
||||
config := &strings.Builder{}
|
||||
for _, node := range nodes {
|
||||
cmd := "tar -Pc /etc/rancher/k3s/ | tar -vxPO"
|
||||
if c, err := RunCmdOnNode(cmd, node); err != nil {
|
||||
if c, err := node.RunCmdOnNode(cmd); err != nil {
|
||||
fmt.Fprintf(config, "** failed to get config for node %s ***\n%v\n", node, err)
|
||||
} else {
|
||||
fmt.Fprintf(config, "** config for node %s ***\n%s\n", node, c)
|
||||
|
@ -416,7 +470,7 @@ func GetVagrantLog(cErr error) string {
|
|||
var nodeErr *NodeError
|
||||
nodeJournal := ""
|
||||
if errors.As(cErr, &nodeErr) {
|
||||
nodeJournal, _ = GetJournalLogs(nodeErr.Node)
|
||||
nodeJournal, _ = nodeErr.Node.GetJournalLogs()
|
||||
nodeJournal = "\nNode Journal Logs:\n" + nodeJournal
|
||||
}
|
||||
|
||||
|
@ -464,51 +518,17 @@ func ParseNodes(kubeConfig string, print bool) ([]Node, error) {
|
|||
return nodes, nil
|
||||
}
|
||||
|
||||
func formatPods(input string) ([]Pod, error) {
|
||||
pods := make([]Pod, 0, 10)
|
||||
input = strings.TrimSpace(input)
|
||||
split := strings.Split(input, "\n")
|
||||
for _, rec := range split {
|
||||
fields := strings.Fields(string(rec))
|
||||
if len(fields) < 8 {
|
||||
return nil, fmt.Errorf("invalid pod record: %s", rec)
|
||||
}
|
||||
pod := Pod{
|
||||
NameSpace: fields[0],
|
||||
Name: fields[1],
|
||||
Ready: fields[2],
|
||||
Status: fields[3],
|
||||
Restarts: fields[4],
|
||||
IP: fields[6],
|
||||
Node: fields[7],
|
||||
}
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
func ParsePods(kubeConfig string, print bool) ([]Pod, error) {
|
||||
podList := ""
|
||||
|
||||
func DumpPods(kubeConfig string) {
|
||||
cmd := "kubectl get pods -o wide --no-headers -A"
|
||||
res, _ := RunCommand(cmd)
|
||||
podList = strings.TrimSpace(res)
|
||||
|
||||
pods, err := formatPods(res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if print {
|
||||
fmt.Println(podList)
|
||||
}
|
||||
return pods, nil
|
||||
fmt.Println(strings.TrimSpace(res))
|
||||
}
|
||||
|
||||
// RestartCluster restarts the k3s service on each node given
|
||||
func RestartCluster(nodeNames []string) error {
|
||||
for _, nodeName := range nodeNames {
|
||||
func RestartCluster(nodes []VagrantNode) error {
|
||||
for _, node := range nodes {
|
||||
cmd := "systemctl restart k3s* --all"
|
||||
if _, err := RunCmdOnNode(cmd, nodeName); err != nil {
|
||||
if _, err := node.RunCmdOnNode(cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -516,13 +536,13 @@ func RestartCluster(nodeNames []string) error {
|
|||
}
|
||||
|
||||
// StartCluster starts the k3s service on each node given
|
||||
func StartCluster(nodeNames []string) error {
|
||||
for _, nodeName := range nodeNames {
|
||||
func StartCluster(nodes []VagrantNode) error {
|
||||
for _, node := range nodes {
|
||||
cmd := "systemctl start k3s"
|
||||
if strings.Contains(nodeName, "agent") {
|
||||
if strings.Contains(node.String(), "agent") {
|
||||
cmd += "-agent"
|
||||
}
|
||||
if _, err := RunCmdOnNode(cmd, nodeName); err != nil {
|
||||
if _, err := node.RunCmdOnNode(cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -530,10 +550,10 @@ func StartCluster(nodeNames []string) error {
|
|||
}
|
||||
|
||||
// StopCluster starts the k3s service on each node given
|
||||
func StopCluster(nodeNames []string) error {
|
||||
for _, nodeName := range nodeNames {
|
||||
func StopCluster(nodes []VagrantNode) error {
|
||||
for _, node := range nodes {
|
||||
cmd := "systemctl stop k3s*"
|
||||
if _, err := RunCmdOnNode(cmd, nodeName); err != nil {
|
||||
if _, err := node.RunCmdOnNode(cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -541,18 +561,18 @@ func StopCluster(nodeNames []string) error {
|
|||
}
|
||||
|
||||
// RunCmdOnNode executes a command from within the given node as sudo
|
||||
func RunCmdOnNode(cmd string, nodename string) (string, error) {
|
||||
func (v VagrantNode) RunCmdOnNode(cmd string) (string, error) {
|
||||
injectEnv := ""
|
||||
if _, ok := os.LookupEnv("E2E_GOCOVER"); ok && strings.HasPrefix(cmd, "k3s") {
|
||||
injectEnv = "GOCOVERDIR=/tmp/k3scov "
|
||||
}
|
||||
runcmd := "vagrant ssh " + nodename + " -c \"sudo " + injectEnv + cmd + "\""
|
||||
runcmd := "vagrant ssh --no-tty " + v.String() + " -c \"sudo " + injectEnv + cmd + "\""
|
||||
out, err := RunCommand(runcmd)
|
||||
// On GHA CI we see warnings about "[fog][WARNING] Unrecognized arguments: libvirt_ip_command"
|
||||
// these are added to the command output and need to be removed
|
||||
out = strings.ReplaceAll(out, "[fog][WARNING] Unrecognized arguments: libvirt_ip_command\n", "")
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to run command: %s on node %s: %s, %v", cmd, nodename, out, err)
|
||||
return out, fmt.Errorf("failed to run command: %s on node %s: %s, %v", cmd, v.String(), out, err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
@ -569,16 +589,16 @@ func RunCommand(cmd string) (string, error) {
|
|||
return string(out), err
|
||||
}
|
||||
|
||||
func UpgradeCluster(nodeNames []string, local bool) error {
|
||||
func UpgradeCluster(nodes []VagrantNode, local bool) error {
|
||||
upgradeVersion := "E2E_RELEASE_CHANNEL=commit"
|
||||
if local {
|
||||
if err := scpK3sBinary(nodeNames); err != nil {
|
||||
if err := scpK3sBinary(nodes); err != nil {
|
||||
return err
|
||||
}
|
||||
upgradeVersion = "E2E_RELEASE_VERSION=skip"
|
||||
}
|
||||
for _, nodeName := range nodeNames {
|
||||
cmd := upgradeVersion + " vagrant provision " + nodeName
|
||||
for _, node := range nodes {
|
||||
cmd := upgradeVersion + " vagrant provision " + node.String()
|
||||
if out, err := RunCommand(cmd); err != nil {
|
||||
fmt.Println("Error Upgrading Cluster", out)
|
||||
return err
|
||||
|
@ -587,16 +607,16 @@ func UpgradeCluster(nodeNames []string, local bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func GetCoverageReport(nodeNames []string) error {
|
||||
func GetCoverageReport(nodes []VagrantNode) error {
|
||||
if os.Getenv("E2E_GOCOVER") == "" {
|
||||
return nil
|
||||
}
|
||||
covDirs := []string{}
|
||||
for _, nodeName := range nodeNames {
|
||||
covDir := nodeName + "-cov"
|
||||
for _, node := range nodes {
|
||||
covDir := node.String() + "-cov"
|
||||
covDirs = append(covDirs, covDir)
|
||||
os.MkdirAll(covDir, 0755)
|
||||
cmd := "vagrant scp " + nodeName + ":/tmp/k3scov/* " + covDir
|
||||
cmd := "vagrant scp " + node.String() + ":/tmp/k3scov/* " + covDir
|
||||
if _, err := RunCommand(cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -627,19 +647,29 @@ func GetCoverageReport(nodeNames []string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// getPodIPs returns the IPs of all pods
|
||||
// GetDaemonsetReady returns the number of ready pods for the given daemonset
|
||||
func GetDaemonsetReady(daemonset string, kubeConfigFile string) (int, error) {
|
||||
cmd := "kubectl get ds " + daemonset + " -o jsonpath='{range .items[*]}{.status.numberReady}' --kubeconfig=" + kubeConfigFile
|
||||
out, err := RunCommand(cmd)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.Atoi(out)
|
||||
}
|
||||
|
||||
// GetPodIPs returns the IPs of all pods
|
||||
func GetPodIPs(kubeConfigFile string) ([]ObjIP, error) {
|
||||
cmd := `kubectl get pods -A -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIPs[*].ip}{"\n"}{end}' --kubeconfig=` + kubeConfigFile
|
||||
return GetObjIPs(cmd)
|
||||
}
|
||||
|
||||
// getNodeIPs returns the IPs of all nodes
|
||||
// GetNodeIPs returns the IPs of all nodes
|
||||
func GetNodeIPs(kubeConfigFile string) ([]ObjIP, error) {
|
||||
cmd := `kubectl get nodes -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.addresses[?(@.type == "InternalIP")].address}{"\n"}{end}' --kubeconfig=` + kubeConfigFile
|
||||
return GetObjIPs(cmd)
|
||||
}
|
||||
|
||||
// getObjIPs executes a command to collect IPs
|
||||
// GetObjIPs executes a command to collect IPs
|
||||
func GetObjIPs(cmd string) ([]ObjIP, error) {
|
||||
var objIPs []ObjIP
|
||||
res, err := RunCommand(cmd)
|
||||
|
|
|
@ -4,10 +4,10 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -33,11 +33,7 @@ func Test_E2EToken(t *testing.T) {
|
|||
RunSpecs(t, "SnapshotRestore Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
|
@ -46,66 +42,54 @@ var _ = Describe("Use the token CLI to create and join agents", Ordered, func()
|
|||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
|
||||
})
|
||||
|
||||
It("Checks Node and Pod Status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
It("Checks node and pod status", func() {
|
||||
By("Fetching Nodes status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
var permToken string
|
||||
It("Creates a permanent agent token", func() {
|
||||
permToken = "perage.s0xt4u0hl5guoyi6"
|
||||
_, err := e2e.RunCmdOnNode("k3s token create --ttl=0 "+permToken, serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode("k3s token create --ttl=0 " + permToken)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
res, err := e2e.RunCmdOnNode("k3s token list", serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode("k3s token list")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).To(MatchRegexp(`perage\s+<forever>\s+<never>`))
|
||||
})
|
||||
It("Joins an agent with the permanent token", func() {
|
||||
cmd := fmt.Sprintf("echo 'token: %s' | sudo tee -a /etc/rancher/k3s/config.yaml > /dev/null", permToken)
|
||||
_, err := e2e.RunCmdOnNode(cmd, agentNodeNames[0])
|
||||
_, err := tc.Agents[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = e2e.RunCmdOnNode("systemctl start k3s-agent", agentNodeNames[0])
|
||||
_, err = tc.Agents[0].RunCmdOnNode("systemctl start k3s-agent")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(len(nodes)).Should(Equal(len(serverNodeNames) + 1))
|
||||
g.Expect(len(nodes)).Should(Equal(len(tc.Servers) + 1))
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
|
@ -114,38 +98,38 @@ var _ = Describe("Use the token CLI to create and join agents", Ordered, func()
|
|||
})
|
||||
Context("Agent joins with temporary token:", func() {
|
||||
It("Creates a 20s agent token", func() {
|
||||
_, err := e2e.RunCmdOnNode("k3s token create --ttl=20s 20sect.jxnpve6vg8dqm895", serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode("k3s token create --ttl=20s 20sect.jxnpve6vg8dqm895")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
res, err := e2e.RunCmdOnNode("k3s token list", serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode("k3s token list")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).To(MatchRegexp(`20sect\s+[0-9]{2}s`))
|
||||
})
|
||||
It("Cleans up 20s token automatically", func() {
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode("k3s token list", serverNodeNames[0])
|
||||
return tc.Servers[0].RunCmdOnNode("k3s token list")
|
||||
}, "25s", "5s").ShouldNot(ContainSubstring("20sect"))
|
||||
})
|
||||
var tempToken string
|
||||
It("Creates a 10m agent token", func() {
|
||||
tempToken = "10mint.ida18trbbk43szwk"
|
||||
_, err := e2e.RunCmdOnNode("k3s token create --ttl=10m "+tempToken, serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode("k3s token create --ttl=10m " + tempToken)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
time.Sleep(2 * time.Second)
|
||||
res, err := e2e.RunCmdOnNode("k3s token list", serverNodeNames[0])
|
||||
res, err := tc.Servers[0].RunCmdOnNode("k3s token list")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).To(MatchRegexp(`10mint\s+[0-9]m`))
|
||||
})
|
||||
It("Joins an agent with the 10m token", func() {
|
||||
cmd := fmt.Sprintf("echo 'token: %s' | sudo tee -a /etc/rancher/k3s/config.yaml > /dev/null", tempToken)
|
||||
_, err := e2e.RunCmdOnNode(cmd, agentNodeNames[1])
|
||||
_, err := tc.Agents[1].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = e2e.RunCmdOnNode("systemctl start k3s-agent", agentNodeNames[1])
|
||||
_, err = tc.Agents[1].RunCmdOnNode("systemctl start k3s-agent")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(len(nodes)).Should(Equal(len(serverNodeNames) + 2))
|
||||
g.Expect(len(nodes)).Should(Equal(len(tc.Servers) + 2))
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
|
@ -155,23 +139,23 @@ var _ = Describe("Use the token CLI to create and join agents", Ordered, func()
|
|||
Context("Rotate server bootstrap token", func() {
|
||||
serverToken := "1234"
|
||||
It("Creates a new server token", func() {
|
||||
Expect(e2e.RunCmdOnNode("k3s token rotate -t vagrant --new-token="+serverToken, serverNodeNames[0])).
|
||||
Expect(tc.Servers[0].RunCmdOnNode("k3s token rotate -t vagrant --new-token=" + serverToken)).
|
||||
To(ContainSubstring("Token rotated, restart k3s nodes with new token"))
|
||||
})
|
||||
It("Restarts servers with the new token", func() {
|
||||
cmd := fmt.Sprintf("sed -i 's/token:.*/token: %s/' /etc/rancher/k3s/config.yaml", serverToken)
|
||||
for _, node := range serverNodeNames {
|
||||
_, err := e2e.RunCmdOnNode(cmd, node)
|
||||
for _, node := range tc.Servers {
|
||||
_, err := node.RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
for _, node := range serverNodeNames {
|
||||
_, err := e2e.RunCmdOnNode("systemctl restart k3s", node)
|
||||
for _, node := range tc.Servers {
|
||||
_, err := node.RunCmdOnNode("systemctl restart k3s")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(len(nodes)).Should(Equal(len(serverNodeNames) + 2))
|
||||
g.Expect(len(nodes)).Should(Equal(len(tc.Servers) + 2))
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
|
@ -179,15 +163,15 @@ var _ = Describe("Use the token CLI to create and join agents", Ordered, func()
|
|||
})
|
||||
It("Rejoins an agent with the new server token", func() {
|
||||
cmd := fmt.Sprintf("sed -i 's/token:.*/token: %s/' /etc/rancher/k3s/config.yaml", serverToken)
|
||||
_, err := e2e.RunCmdOnNode(cmd, agentNodeNames[0])
|
||||
_, err := tc.Agents[0].RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = e2e.RunCmdOnNode("systemctl restart k3s-agent", agentNodeNames[0])
|
||||
_, err = tc.Agents[0].RunCmdOnNode("systemctl restart k3s-agent")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(len(nodes)).Should(Equal(len(serverNodeNames) + 2))
|
||||
g.Expect(len(nodes)).Should(Equal(len(tc.Servers) + 2))
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
|
@ -203,12 +187,12 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...)))
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(tc.Servers, tc.Agents...)))
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -4,9 +4,9 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -36,11 +36,7 @@ func Test_E2EUpgradeValidation(t *testing.T) {
|
|||
RunSpecs(t, "Upgrade Cluster Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
|
@ -48,72 +44,61 @@ var _ = Describe("Verify Upgrade", Ordered, func() {
|
|||
Context("Cluster :", func() {
|
||||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
tc.Hardened = *hardened
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Checks Node and Pod Status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
It("Checks node and pod status", func() {
|
||||
By("Fetching Nodes status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("Verifies ClusterIP Service", func() {
|
||||
_, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("clusterip.yaml")
|
||||
|
||||
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
|
||||
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
|
||||
|
||||
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false)
|
||||
clusterip, _ := e2e.FetchClusterIP(tc.KubeConfigFile, "nginx-clusterip-svc", false)
|
||||
cmd = "curl -L --insecure http://" + clusterip + "/name.html"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
for _, node := range tc.Servers {
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, nodeName)
|
||||
return node.RunCmdOnNode(cmd)
|
||||
}, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies NodePort Service", func() {
|
||||
_, err := e2e.DeployWorkload("nodeport.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("nodeport.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed")
|
||||
|
||||
for _, nodeName := range serverNodeNames {
|
||||
nodeExternalIP, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
for _, node := range tc.Servers {
|
||||
nodeExternalIP, _ := node.FetchNodeExternalIP()
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
|
||||
|
||||
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
|
||||
|
@ -127,15 +112,15 @@ var _ = Describe("Verify Upgrade", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies LoadBalancer Service", func() {
|
||||
_, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("loadbalancer.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed")
|
||||
for _, nodeName := range serverNodeNames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
for _, node := range tc.Servers {
|
||||
ip, _ := node.FetchNodeExternalIP()
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
port, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))
|
||||
|
@ -148,11 +133,11 @@ var _ = Describe("Verify Upgrade", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies Ingress", func() {
|
||||
_, err := e2e.DeployWorkload("ingress.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("ingress.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
|
||||
|
||||
for _, nodeName := range serverNodeNames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
for _, node := range tc.Servers {
|
||||
ip, _ := node.FetchNodeExternalIP()
|
||||
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
|
@ -161,42 +146,38 @@ var _ = Describe("Verify Upgrade", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies Daemonset", func() {
|
||||
_, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("daemonset.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
|
||||
|
||||
nodes, _ := e2e.ParseNodes(kubeConfigFile, false) //nodes :=
|
||||
nodes, _ := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
pods, _ := e2e.ParsePods(kubeConfigFile, false)
|
||||
count := e2e.CountOfStringInSlice("test-daemonset", pods)
|
||||
fmt.Println("POD COUNT")
|
||||
fmt.Println(count)
|
||||
fmt.Println("NODE COUNT")
|
||||
fmt.Println(len(nodes))
|
||||
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
|
||||
count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeConfigFile)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(nodes).To(HaveLen(count), "Daemonset pod count does not match node count")
|
||||
}, "240s", "10s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies dns access", func() {
|
||||
_, err := e2e.DeployWorkload("dnsutils.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("dnsutils.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed")
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + tc.KubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "420s", "2s").Should(ContainSubstring("dnsutils"))
|
||||
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||
cmd := "kubectl --kubeconfig=" + tc.KubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "420s", "2s").Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
|
||||
})
|
||||
|
||||
It("Verifies Local Path Provisioner storage ", func() {
|
||||
_, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("local-path-provisioner.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + tc.KubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
|
@ -205,7 +186,7 @@ var _ = Describe("Verify Upgrade", Ordered, func() {
|
|||
}, "240s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + tc.KubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
|
@ -214,25 +195,25 @@ var _ = Describe("Verify Upgrade", Ordered, func() {
|
|||
g.Expect(res).Should(ContainSubstring("Running"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
|
||||
cmd := "kubectl --kubeconfig=" + tc.KubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res)
|
||||
fmt.Println("Data stored in pvc: local-path-test")
|
||||
|
||||
cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
cmd = "kubectl delete pod volume-test --kubeconfig=" + tc.KubeConfigFile
|
||||
res, err = e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res)
|
||||
|
||||
_, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
|
||||
_, err = tc.DeployWorkload("local-path-provisioner.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + tc.KubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "420s", "2s").Should(ContainSubstring("local-path-provisioner"))
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + tc.KubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
|
@ -242,72 +223,62 @@ var _ = Describe("Verify Upgrade", Ordered, func() {
|
|||
|
||||
// Check data after re-creation
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl exec volume-test --kubeconfig=" + kubeConfigFile + " -- cat /data/test"
|
||||
cmd := "kubectl exec volume-test --kubeconfig=" + tc.KubeConfigFile + " -- cat /data/test"
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "180s", "2s").Should(ContainSubstring("local-path-test"), "Failed to retrieve data from pvc")
|
||||
})
|
||||
|
||||
It("Upgrades with no issues", func() {
|
||||
var err error
|
||||
Expect(e2e.UpgradeCluster(append(serverNodeNames, agentNodeNames...), *local)).To(Succeed())
|
||||
Expect(e2e.RestartCluster(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.UpgradeCluster(append(tc.Servers, tc.Agents...), *local)).To(Succeed())
|
||||
Expect(e2e.RestartCluster(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
fmt.Println("CLUSTER UPGRADED")
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
tc.KubeConfigFile, err = e2e.GenKubeConfigFile(tc.Servers[0].String())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("After upgrade Checks Node and Pod Status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
By("Fetching Nodes status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
e2e.ParseNodes(kubeConfigFile, true)
|
||||
e2e.ParseNodes(tc.KubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"))
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"))
|
||||
}
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
e2e.ParsePods(kubeConfigFile, true)
|
||||
By("Fetching Pod status")
|
||||
tests.AllPodsUp(tc.KubeConfigFile)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("After upgrade verifies ClusterIP Service", func() {
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "420s", "5s").Should(ContainSubstring("test-clusterip"))
|
||||
|
||||
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false)
|
||||
clusterip, _ := e2e.FetchClusterIP(tc.KubeConfigFile, "nginx-clusterip-svc", false)
|
||||
cmd := "curl -L --insecure http://" + clusterip + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
for _, nodeName := range serverNodeNames {
|
||||
for _, node := range tc.Servers {
|
||||
Eventually(func() (string, error) {
|
||||
return e2e.RunCmdOnNode(cmd, nodeName)
|
||||
return node.RunCmdOnNode(cmd)
|
||||
}, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
|
||||
}
|
||||
})
|
||||
|
||||
It("After upgrade verifies NodePort Service", func() {
|
||||
|
||||
for _, nodeName := range serverNodeNames {
|
||||
nodeExternalIP, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
for _, node := range tc.Servers {
|
||||
nodeExternalIP, _ := node.FetchNodeExternalIP()
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
|
||||
|
||||
|
@ -320,9 +291,9 @@ var _ = Describe("Verify Upgrade", Ordered, func() {
|
|||
})
|
||||
|
||||
It("After upgrade verifies LoadBalancer Service", func() {
|
||||
for _, nodeName := range serverNodeNames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
for _, node := range tc.Servers {
|
||||
ip, _ := node.FetchNodeExternalIP()
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
port, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Eventually(func() (string, error) {
|
||||
|
@ -331,15 +302,15 @@ var _ = Describe("Verify Upgrade", Ordered, func() {
|
|||
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))
|
||||
}
|
||||
})
|
||||
|
||||
It("After upgrade verifies Ingress", func() {
|
||||
for _, nodeName := range serverNodeNames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
for _, node := range tc.Servers {
|
||||
ip, _ := node.FetchNodeExternalIP()
|
||||
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
|
||||
|
@ -350,28 +321,24 @@ var _ = Describe("Verify Upgrade", Ordered, func() {
|
|||
})
|
||||
|
||||
It("After upgrade verifies Daemonset", func() {
|
||||
nodes, _ := e2e.ParseNodes(kubeConfigFile, false) //nodes :=
|
||||
nodes, _ := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
pods, _ := e2e.ParsePods(kubeConfigFile, false)
|
||||
count := e2e.CountOfStringInSlice("test-daemonset", pods)
|
||||
fmt.Println("POD COUNT")
|
||||
fmt.Println(count)
|
||||
fmt.Println("NODE COUNT")
|
||||
fmt.Println(len(nodes))
|
||||
g.Expect(len(nodes)).Should(Equal(count), "Daemonset pod count does not match node count")
|
||||
}, "420s", "1s").Should(Succeed())
|
||||
count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeConfigFile)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(nodes).To(HaveLen(count), "Daemonset pod count does not match node count")
|
||||
}, "240s", "10s").Should(Succeed())
|
||||
})
|
||||
It("After upgrade verifies dns access", func() {
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||
cmd := "kubectl --kubeconfig=" + tc.KubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "180s", "2s").Should((ContainSubstring("kubernetes.default.svc.cluster.local")))
|
||||
})
|
||||
|
||||
It("After upgrade verify Local Path Provisioner storage ", func() {
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl exec volume-test --kubeconfig=" + kubeConfigFile + " -- cat /data/test"
|
||||
cmd := "kubectl exec volume-test --kubeconfig=" + tc.KubeConfigFile + " -- cat /data/test"
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "180s", "2s").Should(ContainSubstring("local-path-test"))
|
||||
})
|
||||
|
@ -385,12 +352,12 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...)))
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(tc.Servers, tc.Agents...)))
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -35,11 +36,7 @@ func Test_E2EClusterValidation(t *testing.T) {
|
|||
RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
|
@ -48,61 +45,51 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
tc.Hardened = *hardened
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
})
|
||||
|
||||
It("Checks Node and Pod Status", func() {
|
||||
It("Checks node and pod status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
_, _ = e2e.ParseNodes(tc.KubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
e2e.DumpPods(tc.KubeConfigFile)
|
||||
})
|
||||
|
||||
It("Verifies ClusterIP Service", func() {
|
||||
res, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, *hardened)
|
||||
res, err := tc.DeployWorkload("clusterip.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed: "+res)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should((ContainSubstring("test-clusterip")), "failed cmd: %q result: %s", cmd, res)
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
|
||||
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false)
|
||||
clusterip, _ := e2e.FetchClusterIP(tc.KubeConfigFile, "nginx-clusterip-svc", false)
|
||||
cmd := "curl -L --insecure http://" + clusterip + "/name.html"
|
||||
for _, nodeName := range serverNodeNames {
|
||||
for _, node := range tc.Servers {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
res, err := node.RunCmdOnNode(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
Expect(res).Should(ContainSubstring("test-clusterip"))
|
||||
}, "120s", "10s").Should(Succeed())
|
||||
|
@ -110,17 +97,17 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies NodePort Service", func() {
|
||||
_, err := e2e.DeployWorkload("nodeport.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("nodeport.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed")
|
||||
|
||||
for _, nodeName := range serverNodeNames {
|
||||
nodeExternalIP, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
for _, node := range tc.Servers {
|
||||
nodeExternalIP, _ := node.FetchNodeExternalIP()
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
|
||||
|
@ -137,18 +124,18 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies LoadBalancer Service", func() {
|
||||
_, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("loadbalancer.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed")
|
||||
|
||||
for _, nodeName := range serverNodeNames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
for _, node := range tc.Servers {
|
||||
ip, _ := node.FetchNodeExternalIP()
|
||||
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
port, err := e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res)
|
||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
||||
|
@ -164,11 +151,11 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies Ingress", func() {
|
||||
_, err := e2e.DeployWorkload("ingress.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("ingress.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
|
||||
|
||||
for _, nodeName := range serverNodeNames {
|
||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||
for _, node := range tc.Servers {
|
||||
ip, _ := node.FetchNodeExternalIP()
|
||||
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
|
||||
|
@ -181,35 +168,31 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies Daemonset", func() {
|
||||
_, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("daemonset.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
|
||||
|
||||
nodes, _ := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, _ := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
pods, _ := e2e.ParsePods(kubeConfigFile, false)
|
||||
count := e2e.CountOfStringInSlice("test-daemonset", pods)
|
||||
fmt.Println("POD COUNT")
|
||||
fmt.Println(count)
|
||||
fmt.Println("NODE COUNT")
|
||||
fmt.Println(len(nodes))
|
||||
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
|
||||
}, "420s", "10s").Should(Succeed())
|
||||
count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeConfigFile)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(nodes).To(HaveLen(count), "Daemonset pod count does not match node count")
|
||||
}, "240s", "10s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies dns access", func() {
|
||||
_, err := e2e.DeployWorkload("dnsutils.yaml", kubeConfigFile, *hardened)
|
||||
_, err := tc.DeployWorkload("dnsutils.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + tc.KubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res)
|
||||
g.Expect(res).Should(ContainSubstring("dnsutils"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||
cmd := "kubectl --kubeconfig=" + tc.KubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res)
|
||||
|
@ -218,11 +201,11 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
})
|
||||
|
||||
It("Verifies Local Path Provisioner storage ", func() {
|
||||
res, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
|
||||
res, err := tc.DeployWorkload("local-path-provisioner.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed: "+res)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + tc.KubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res)
|
||||
g.Expect(res).Should(ContainSubstring("local-path-pvc"))
|
||||
|
@ -230,32 +213,32 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + tc.KubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res)
|
||||
g.Expect(res).Should(ContainSubstring("volume-test"))
|
||||
g.Expect(res).Should(ContainSubstring("Running"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
|
||||
cmd := "kubectl --kubeconfig=" + tc.KubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
|
||||
res, err = e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res)
|
||||
|
||||
cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
cmd = "kubectl delete pod volume-test --kubeconfig=" + tc.KubeConfigFile
|
||||
res, err = e2e.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res)
|
||||
|
||||
_, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
|
||||
_, err = tc.DeployWorkload("local-path-provisioner.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + tc.KubeConfigFile
|
||||
res, _ := e2e.RunCommand(cmd)
|
||||
g.Expect(res).Should(ContainSubstring("local-path-provisioner"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + tc.KubeConfigFile
|
||||
res, err := e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res)
|
||||
|
||||
|
@ -264,7 +247,7 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl exec volume-test --kubeconfig=" + kubeConfigFile + " -- cat /data/test"
|
||||
cmd := "kubectl exec volume-test --kubeconfig=" + tc.KubeConfigFile + " -- cat /data/test"
|
||||
res, err = e2e.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res)
|
||||
fmt.Println("Data after re-creation", res)
|
||||
|
@ -275,67 +258,52 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
|
||||
Context("Validate restart", func() {
|
||||
It("Restarts normally", func() {
|
||||
errRestart := e2e.RestartCluster(append(serverNodeNames, agentNodeNames...))
|
||||
errRestart := e2e.RestartCluster(append(tc.Servers, tc.Agents...))
|
||||
Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
pods, _ := e2e.ParsePods(kubeConfigFile, false)
|
||||
count := e2e.CountOfStringInSlice("test-daemonset", pods)
|
||||
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
|
||||
podsRunningAr := 0
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" {
|
||||
podsRunningAr++
|
||||
}
|
||||
}
|
||||
g.Expect(len(nodes)).Should((Equal(podsRunningAr)), "Daemonset pods are not running after the restart")
|
||||
count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeConfigFile)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pods that are ready does not match node count")
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
Context("Valdiate Certificate Rotation", func() {
|
||||
It("Stops K3s and rotates certificates", func() {
|
||||
errStop := e2e.StopCluster(serverNodeNames)
|
||||
errStop := e2e.StopCluster(tc.Servers)
|
||||
Expect(errStop).NotTo(HaveOccurred(), "Cluster could not be stopped successfully")
|
||||
|
||||
for _, nodeName := range serverNodeNames {
|
||||
for _, node := range tc.Servers {
|
||||
cmd := "k3s certificate rotate"
|
||||
_, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
Expect(err).NotTo(HaveOccurred(), "Certificate could not be rotated successfully on "+nodeName)
|
||||
_, err := node.RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "Certificate could not be rotated successfully on "+node.String())
|
||||
}
|
||||
})
|
||||
|
||||
It("Start normally", func() {
|
||||
// Since we stopped all the server, we have to start 2 at once to get it back up
|
||||
// If we only start one at a time, the first will hang waiting for the second to be up
|
||||
_, err := e2e.RunCmdOnNode("systemctl --no-block start k3s", serverNodeNames[0])
|
||||
_, err := tc.Servers[0].RunCmdOnNode("systemctl --no-block start k3s")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = e2e.StartCluster(serverNodeNames[1:])
|
||||
err = e2e.StartCluster(tc.Servers[1:])
|
||||
Expect(err).NotTo(HaveOccurred(), "Cluster could not be started successfully")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
for _, nodeName := range serverNodeNames {
|
||||
for _, node := range tc.Servers {
|
||||
cmd := "test ! -e /var/lib/rancher/k3s/server/tls/dynamic-cert-regenerate"
|
||||
_, err := e2e.RunCmdOnNode(cmd, nodeName)
|
||||
Expect(err).NotTo(HaveOccurred(), "Dynamic cert regenerate file not removed on "+nodeName)
|
||||
_, err := node.RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred(), "Dynamic cert regenerate file not removed on "+node.String())
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
|
@ -354,21 +322,21 @@ var _ = Describe("Verify Create", Ordered, func() {
|
|||
"",
|
||||
}
|
||||
|
||||
for _, nodeName := range serverNodeNames {
|
||||
grCert, errGrep := e2e.RunCmdOnNode(grepCert, nodeName)
|
||||
Expect(errGrep).NotTo(HaveOccurred(), "TLS dirs could not be listed on "+nodeName)
|
||||
for _, node := range tc.Servers {
|
||||
grCert, errGrep := node.RunCmdOnNode(grepCert)
|
||||
Expect(errGrep).NotTo(HaveOccurred(), "TLS dirs could not be listed on "+node.String())
|
||||
re := regexp.MustCompile("tls-[0-9]+")
|
||||
tls := re.FindAllString(grCert, -1)[0]
|
||||
diff := fmt.Sprintf("diff -sr /var/lib/rancher/k3s/server/tls/ /var/lib/rancher/k3s/server/%s/"+
|
||||
"| grep -i identical | cut -f4 -d ' ' | xargs basename -a \n", tls)
|
||||
result, err := e2e.RunCmdOnNode(diff, nodeName)
|
||||
Expect(err).NotTo(HaveOccurred(), "Certificate diff not created successfully on "+nodeName)
|
||||
result, err := node.RunCmdOnNode(diff)
|
||||
Expect(err).NotTo(HaveOccurred(), "Certificate diff not created successfully on "+node.String())
|
||||
|
||||
certArray := strings.Split(result, "\n")
|
||||
Expect((certArray)).Should((Equal(expectResult)), "Certificate diff does not match the expected results on "+nodeName)
|
||||
Expect((certArray)).Should((Equal(expectResult)), "Certificate diff does not match the expected results on "+node.String())
|
||||
}
|
||||
|
||||
errRestartAgent := e2e.RestartCluster(agentNodeNames)
|
||||
errRestartAgent := e2e.RestartCluster(tc.Agents)
|
||||
Expect(errRestartAgent).NotTo(HaveOccurred(), "Agent could not be restart successfully")
|
||||
})
|
||||
|
||||
|
@ -382,12 +350,12 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...)))
|
||||
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(tc.Servers, tc.Agents...)))
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -4,9 +4,9 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/e2e"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -26,92 +26,75 @@ func Test_E2EWasm(t *testing.T) {
|
|||
RunSpecs(t, "Run WebAssenbly Workloads Test Suite", suiteConfig, reporterConfig)
|
||||
}
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
serverNodeNames []string
|
||||
agentNodeNames []string
|
||||
)
|
||||
var tc *e2e.TestConfig
|
||||
|
||||
var _ = ReportAfterEach(e2e.GenReport)
|
||||
|
||||
var _ = Describe("Verify Can run Wasm workloads", Ordered, func() {
|
||||
|
||||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
fmt.Println("CLUSTER CONFIG")
|
||||
fmt.Println("OS:", *nodeOS)
|
||||
fmt.Println("Server Nodes:", serverNodeNames)
|
||||
fmt.Println("Agent Nodes:", agentNodeNames)
|
||||
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Server node needs to be ready before we continue
|
||||
It("Checks Node and Pod Status", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
var _ = Describe("Verify K3s can run Wasm workloads", Ordered, func() {
|
||||
Context("Cluster comes up with Wasm configuration", func() {
|
||||
It("Starts up with no issues", func() {
|
||||
var err error
|
||||
if *local {
|
||||
tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
|
||||
} else {
|
||||
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
||||
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
|
||||
By("CLUSTER CONFIG")
|
||||
By("OS: " + *nodeOS)
|
||||
By(tc.Status())
|
||||
})
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := e2e.ParsePods(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
It("Checks node and pod status", func() {
|
||||
By("Fetching Nodes status")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
|
||||
By("Fetching pod status")
|
||||
Eventually(func() error {
|
||||
return tests.AllPodsUp(tc.KubeConfigFile)
|
||||
}, "620s", "10s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Verify wasm-related containerd shims are installed", func() {
|
||||
expected_shims := []string{"containerd-shim-spin-v2", "containerd-shim-slight-v1"}
|
||||
for _, node := range append(tc.Servers, tc.Agents...) {
|
||||
for _, shim := range expected_shims {
|
||||
cmd := fmt.Sprintf("which %s", shim)
|
||||
_, err := node.RunCmdOnNode(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
}, "620s", "5s").Should(Succeed())
|
||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
||||
})
|
||||
|
||||
It("Verify wasm-related containerd shims are installed", func() {
|
||||
expected_shims := []string{"containerd-shim-spin-v2", "containerd-shim-slight-v1"}
|
||||
for _, node := range append(serverNodeNames, agentNodeNames...) {
|
||||
for _, shim := range expected_shims {
|
||||
cmd := fmt.Sprintf("which %s", shim)
|
||||
_, err := e2e.RunCmdOnNode(cmd, node)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Context("Verify Wasm workloads can run on the cluster", func() {
|
||||
It("Deploy Wasm workloads", func() {
|
||||
out, err := e2e.DeployWorkload("wasm-workloads.yaml", kubeConfigFile, false)
|
||||
out, err := tc.DeployWorkload("wasm-workloads.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), out)
|
||||
})
|
||||
|
||||
It("Wait for slight Pod to be up and running", func() {
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl get pods -o=name -l app=wasm-slight --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods -o=name -l app=wasm-slight --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "240s", "5s").Should(ContainSubstring("pod/wasm-slight"))
|
||||
})
|
||||
|
||||
It("Wait for spin Pod to be up and running", func() {
|
||||
Eventually(func() (string, error) {
|
||||
cmd := "kubectl get pods -o=name -l app=wasm-spin --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
cmd := "kubectl get pods -o=name -l app=wasm-spin --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile
|
||||
return e2e.RunCommand(cmd)
|
||||
}, "120s", "5s").Should(ContainSubstring("pod/wasm-spin"))
|
||||
})
|
||||
|
||||
It("Interact with Wasm applications", func() {
|
||||
ingressIPs, err := e2e.FetchIngressIP(kubeConfigFile)
|
||||
ingressIPs, err := e2e.FetchIngressIP(tc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ingressIPs).To(HaveLen(1))
|
||||
|
||||
|
@ -136,12 +119,12 @@ var _ = AfterEach(func() {
|
|||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
Expect(e2e.SaveJournalLogs(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.SaveJournalLogs(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
} else {
|
||||
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
|
||||
Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed())
|
||||
}
|
||||
if !failed || *ci {
|
||||
Expect(e2e.DestroyCluster()).To(Succeed())
|
||||
Expect(os.Remove(kubeConfigFile)).To(Succeed())
|
||||
Expect(os.Remove(tc.KubeConfigFile)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
tests "github.com/k3s-io/k3s/tests"
|
||||
testutil "github.com/k3s-io/k3s/tests/integration"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -36,7 +37,7 @@ var _ = Describe("ca certificate rotation", Ordered, func() {
|
|||
When("a new server is created", func() {
|
||||
It("starts up with no problems", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "180s", "5s").Should(Succeed())
|
||||
})
|
||||
It("get certificate hash", func() {
|
||||
|
@ -69,7 +70,7 @@ var _ = Describe("ca certificate rotation", Ordered, func() {
|
|||
})
|
||||
It("starts up with no problems", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
})
|
||||
It("get certificate hash", func() {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
tests "github.com/k3s-io/k3s/tests"
|
||||
testutil "github.com/k3s-io/k3s/tests/integration"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -35,7 +36,7 @@ var _ = Describe("certificate rotation", Ordered, func() {
|
|||
When("a new server is created", func() {
|
||||
It("starts up with no problems", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "180s", "5s").Should(Succeed())
|
||||
})
|
||||
It("get certificate hash", func() {
|
||||
|
@ -61,7 +62,7 @@ var _ = Describe("certificate rotation", Ordered, func() {
|
|||
})
|
||||
It("starts up with no problems", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
})
|
||||
It("checks the certificate status", func() {
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
tests "github.com/k3s-io/k3s/tests"
|
||||
testutil "github.com/k3s-io/k3s/tests/integration"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -40,7 +41,7 @@ var _ = Describe("dual stack", Ordered, func() {
|
|||
When("a ipv4 and ipv6 cidr is present", func() {
|
||||
It("starts up with no problems", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "180s", "10s").Should(Succeed())
|
||||
})
|
||||
It("creates pods with two IPs", func() {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
tests "github.com/k3s-io/k3s/tests"
|
||||
testutil "github.com/k3s-io/k3s/tests/integration"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -33,7 +34,7 @@ var _ = Describe("etcd snapshot restore", Ordered, func() {
|
|||
When("a snapshot is restored on existing node", func() {
|
||||
It("etcd starts up with no problems", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "180s", "5s").Should(Succeed())
|
||||
})
|
||||
It("create a workload", func() {
|
||||
|
@ -85,7 +86,7 @@ var _ = Describe("etcd snapshot restore", Ordered, func() {
|
|||
})
|
||||
It("starts up with no problems", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "360s", "5s").Should(Succeed())
|
||||
})
|
||||
It("make sure workload 1 exists", func() {
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
tests "github.com/k3s-io/k3s/tests"
|
||||
testutil "github.com/k3s-io/k3s/tests/integration"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -40,7 +41,7 @@ var _ = Describe("etcd snapshots", Ordered, func() {
|
|||
When("a new etcd is created", func() {
|
||||
It("starts up with no problems", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "180s", "10s").Should(Succeed())
|
||||
})
|
||||
It("saves an etcd snapshot", func() {
|
||||
|
@ -130,7 +131,7 @@ var _ = Describe("etcd snapshots", Ordered, func() {
|
|||
server, err = testutil.K3sStartServer(localServerArgs...)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "180s", "5s").Should(Succeed())
|
||||
|
||||
})
|
||||
|
@ -156,7 +157,7 @@ var _ = Describe("etcd snapshots", Ordered, func() {
|
|||
server, err = testutil.K3sStartServer(localServerArgs...)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "180s", "5s").Should(Succeed())
|
||||
|
||||
})
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
tests "github.com/k3s-io/k3s/tests"
|
||||
testutil "github.com/k3s-io/k3s/tests/integration"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -41,7 +42,7 @@ var _ = Describe("flannel-ipv6-masq", Ordered, func() {
|
|||
When("a ipv4 and ipv6 cidr is present", func() {
|
||||
It("starts up with no problems", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "180s", "10s").Should(Succeed())
|
||||
})
|
||||
It("creates pods with two IPs", func() {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
This test verifies that even if we use flannel-backend=none, kube-api starts correctly so that it can
|
||||
This test verifies that even if we use flannel-backend=none, kube-api starts correctly so that it can
|
||||
accept the custom CNI plugin manifest. We want to catch regressions in which kube-api is unresponsive.
|
||||
To do so we check for 25s that we can consistently query kube-api. We check that pods are in pending
|
||||
state, which is what should happen if there is no cni plugin
|
||||
|
@ -14,8 +14,6 @@ import (
|
|||
testutil "github.com/k3s-io/k3s/tests/integration"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var server *testutil.K3sServer
|
||||
|
@ -44,25 +42,24 @@ var _ = Describe("flannel-backend=none", Ordered, func() {
|
|||
It("checks pods status", func() {
|
||||
// Wait for pods to come up before running the real test
|
||||
Eventually(func() int {
|
||||
pods, _ := testutil.ParsePods("kube-system", metav1.ListOptions{})
|
||||
pods, _ := testutil.ParsePodsInNS("kube-system")
|
||||
return len(pods)
|
||||
}, "180s", "5s").Should(BeNumerically(">", 0))
|
||||
|
||||
|
||||
pods, err := testutil.ParsePods("kube-system", metav1.ListOptions{})
|
||||
pods, err := testutil.ParsePodsInNS("kube-system")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Pods should remain in Pending state because there is no network plugin
|
||||
Consistently(func () bool {
|
||||
Consistently(func() bool {
|
||||
for _, pod := range pods {
|
||||
if !strings.Contains(string(pod.Status.Phase), "Pending") {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, "25s").Should(BeTrue())
|
||||
})
|
||||
}, "25s").Should(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var failed bool
|
||||
|
|
|
@ -15,20 +15,20 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/k3s-io/k3s/pkg/flock"
|
||||
"github.com/k3s-io/k3s/tests"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vishvananda/netlink"
|
||||
"golang.org/x/sys/unix"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// Compile-time variable
|
||||
var existingServer = "False"
|
||||
|
||||
const lockFile = "/tmp/k3s-test.lock"
|
||||
const DefaultConfig = "/etc/rancher/k3s/k3s.yaml"
|
||||
|
||||
type K3sServer struct {
|
||||
cmd *exec.Cmd
|
||||
|
@ -128,60 +128,8 @@ func K3sServerArgs() []string {
|
|||
return args
|
||||
}
|
||||
|
||||
// K3sDefaultDeployments checks if the default deployments for K3s are ready, otherwise returns an error
|
||||
func K3sDefaultDeployments() error {
|
||||
return CheckDeployments(metav1.NamespaceSystem, []string{"coredns", "local-path-provisioner", "metrics-server", "traefik"})
|
||||
}
|
||||
|
||||
// CheckDeployments checks if the provided list of deployments are ready, otherwise returns an error
|
||||
func CheckDeployments(namespace string, deployments []string) error {
|
||||
client, err := k8sClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, deploymentName := range deployments {
|
||||
deployment, err := client.AppsV1().Deployments(namespace).Get(context.Background(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if deployment.Status.ReadyReplicas != deployment.Status.Replicas || deployment.Status.AvailableReplicas != deployment.Status.Replicas {
|
||||
return fmt.Errorf("deployment %s not ready: replicas=%d readyReplicas=%d availableReplicas=%d",
|
||||
deploymentName, deployment.Status.Replicas, deployment.Status.ReadyReplicas, deployment.Status.AvailableReplicas)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParsePods(namespace string, opts metav1.ListOptions) ([]corev1.Pod, error) {
|
||||
clientSet, err := k8sClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods, err := clientSet.CoreV1().Pods(namespace).List(context.Background(), opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pods.Items, nil
|
||||
}
|
||||
|
||||
func ParseNodes() ([]corev1.Node, error) {
|
||||
clientSet, err := k8sClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes, err := clientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nodes.Items, nil
|
||||
}
|
||||
|
||||
func GetPod(namespace, name string) (*corev1.Pod, error) {
|
||||
client, err := k8sClient()
|
||||
client, err := tests.K8sClient(DefaultConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -189,7 +137,7 @@ func GetPod(namespace, name string) (*corev1.Pod, error) {
|
|||
}
|
||||
|
||||
func GetPersistentVolumeClaim(namespace, name string) (*corev1.PersistentVolumeClaim, error) {
|
||||
client, err := k8sClient()
|
||||
client, err := tests.K8sClient(DefaultConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -197,7 +145,7 @@ func GetPersistentVolumeClaim(namespace, name string) (*corev1.PersistentVolumeC
|
|||
}
|
||||
|
||||
func GetPersistentVolume(name string) (*corev1.PersistentVolume, error) {
|
||||
client, err := k8sClient()
|
||||
client, err := tests.K8sClient(DefaultConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -355,7 +303,7 @@ func K3sSaveLog(server *K3sServer, dump bool) error {
|
|||
}
|
||||
|
||||
func GetEndpointsAddresses() (string, error) {
|
||||
client, err := k8sClient()
|
||||
client, err := tests.K8sClient(DefaultConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -414,14 +362,15 @@ func unmountFolder(folder string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func k8sClient() (*kubernetes.Clientset, error) {
|
||||
config, err := clientcmd.BuildConfigFromFlags("", "/etc/rancher/k3s/k3s.yaml")
|
||||
func ParsePodsInNS(namespace string) ([]corev1.Pod, error) {
|
||||
clientSet, err := tests.K8sClient(DefaultConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientSet, err := kubernetes.NewForConfig(config)
|
||||
pods, err := clientSet.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return clientSet, nil
|
||||
|
||||
return pods.Items, nil
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
tests "github.com/k3s-io/k3s/tests"
|
||||
testutil "github.com/k3s-io/k3s/tests/integration"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -126,7 +127,7 @@ var _ = Describe("create a new cluster with kube-* flags", Ordered, func() {
|
|||
|
||||
// Pods should not be healthy without kube-proxy
|
||||
Consistently(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "100s", "5s").Should(HaveOccurred())
|
||||
})
|
||||
It("should not find kube-proxy starting", func() {
|
||||
|
@ -178,7 +179,7 @@ var _ = Describe("create a new cluster with kube-* flags", Ordered, func() {
|
|||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "180s", "5s").Should(Succeed())
|
||||
|
||||
})
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
tests "github.com/k3s-io/k3s/tests"
|
||||
testutil "github.com/k3s-io/k3s/tests/integration"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -35,7 +36,7 @@ var _ = Describe("local storage", Ordered, func() {
|
|||
When("a new local storage is created", func() {
|
||||
It("starts up with no problems", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "120s", "5s").Should(Succeed())
|
||||
})
|
||||
It("creates a new pvc", func() {
|
||||
|
|
|
@ -6,10 +6,10 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
tests "github.com/k3s-io/k3s/tests"
|
||||
testutil "github.com/k3s-io/k3s/tests/integration"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var server *testutil.K3sServer
|
||||
|
@ -38,7 +38,7 @@ var _ = Describe("longhorn", Ordered, func() {
|
|||
When("a new cluster is created", func() {
|
||||
It("starts up with no problems", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "120s", "5s").Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
@ -57,7 +57,7 @@ var _ = Describe("longhorn", Ordered, func() {
|
|||
})
|
||||
It("starts the longhorn pods with no problems", func() {
|
||||
Eventually(func() error {
|
||||
pods, err := testutil.ParsePods("longhorn-system", metav1.ListOptions{})
|
||||
pods, err := testutil.ParsePodsInNS("longhorn-system")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
tests "github.com/k3s-io/k3s/tests"
|
||||
testutil "github.com/k3s-io/k3s/tests/integration"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -35,7 +36,7 @@ var _ = Describe("secrets encryption rotation", Ordered, func() {
|
|||
When("A server starts with secrets encryption", func() {
|
||||
It("starts up with no problems", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "180s", "5s").Should(Succeed())
|
||||
})
|
||||
It("it creates a encryption key", func() {
|
||||
|
@ -66,7 +67,7 @@ var _ = Describe("secrets encryption rotation", Ordered, func() {
|
|||
secretsEncryptionServer, err = testutil.K3sStartServer(secretsEncryptionServerArgs...)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "180s", "5s").Should(Succeed())
|
||||
Eventually(func() (string, error) {
|
||||
return testutil.K3sCmd("secrets-encrypt status -d", secretsEncryptionDataDir)
|
||||
|
@ -91,7 +92,7 @@ var _ = Describe("secrets encryption rotation", Ordered, func() {
|
|||
secretsEncryptionServer, err = testutil.K3sStartServer(secretsEncryptionServerArgs...)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "180s", "5s").Should(Succeed())
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
|
@ -128,7 +129,7 @@ var _ = Describe("secrets encryption rotation", Ordered, func() {
|
|||
secretsEncryptionServer, err = testutil.K3sStartServer(secretsEncryptionServerArgs...)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "180s", "5s").Should(Succeed())
|
||||
time.Sleep(10 * time.Second)
|
||||
})
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
tests "github.com/k3s-io/k3s/tests"
|
||||
testutil "github.com/k3s-io/k3s/tests/integration"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -37,7 +38,7 @@ var _ = Describe("startup tests", Ordered, func() {
|
|||
})
|
||||
It("has the default pods deployed", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "120s", "5s").Should(Succeed())
|
||||
})
|
||||
It("has kine without tls", func() {
|
||||
|
@ -78,7 +79,7 @@ var _ = Describe("startup tests", Ordered, func() {
|
|||
})
|
||||
It("has the default pods deployed", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "120s", "5s").Should(Succeed())
|
||||
})
|
||||
It("set kine to use tls", func() {
|
||||
|
@ -107,7 +108,7 @@ var _ = Describe("startup tests", Ordered, func() {
|
|||
})
|
||||
It("has the default pods deployed", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "120s", "5s").Should(Succeed())
|
||||
})
|
||||
It("dies cleanly", func() {
|
||||
|
@ -124,9 +125,9 @@ var _ = Describe("startup tests", Ordered, func() {
|
|||
})
|
||||
It("has the default pods without traefik deployed", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.CheckDeployments("kube-system", []string{"coredns", "local-path-provisioner", "metrics-server"})
|
||||
return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server"}, testutil.DefaultConfig)
|
||||
}, "90s", "10s").Should(Succeed())
|
||||
nodes, err := testutil.ParseNodes()
|
||||
nodes, err := tests.ParseNodes(testutil.DefaultConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(nodes).To(HaveLen(1))
|
||||
})
|
||||
|
@ -156,10 +157,10 @@ var _ = Describe("startup tests", Ordered, func() {
|
|||
})
|
||||
It("has the node deployed with correct IPs", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "120s", "10s").Should(Succeed())
|
||||
|
||||
nodes, err := testutil.ParseNodes()
|
||||
nodes, err := tests.ParseNodes(testutil.DefaultConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(nodes).To(HaveLen(1))
|
||||
Expect(nodes[0].Status.Addresses).To(ContainElements([]v1.NodeAddress{
|
||||
|
@ -201,9 +202,9 @@ var _ = Describe("startup tests", Ordered, func() {
|
|||
})
|
||||
It("has the default pods deployed", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "120s", "5s").Should(Succeed())
|
||||
nodes, err := testutil.ParseNodes()
|
||||
nodes, err := tests.ParseNodes(testutil.DefaultConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(nodes).To(HaveLen(1))
|
||||
})
|
||||
|
@ -229,16 +230,16 @@ var _ = Describe("startup tests", Ordered, func() {
|
|||
})
|
||||
It("has the default pods deployed", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "120s", "5s").Should(Succeed())
|
||||
nodes, err := testutil.ParseNodes()
|
||||
nodes, err := tests.ParseNodes(testutil.DefaultConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(nodes).To(HaveLen(1))
|
||||
})
|
||||
var nodes []v1.Node
|
||||
It("has a custom node name with id appended", func() {
|
||||
var err error
|
||||
nodes, err = testutil.ParseNodes()
|
||||
nodes, err = tests.ParseNodes(testutil.DefaultConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(nodes).To(HaveLen(1))
|
||||
Expect(nodes[0].Name).To(MatchRegexp(`-[0-9a-f]*`))
|
||||
|
@ -264,9 +265,9 @@ var _ = Describe("startup tests", Ordered, func() {
|
|||
})
|
||||
It("has the default pods deployed", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "120s", "5s").Should(Succeed())
|
||||
nodes, err := testutil.ParseNodes()
|
||||
nodes, err := tests.ParseNodes(testutil.DefaultConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(nodes).To(HaveLen(1))
|
||||
})
|
||||
|
@ -285,7 +286,7 @@ var _ = Describe("startup tests", Ordered, func() {
|
|||
})
|
||||
It("has the default pods deployed", func() {
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "120s", "5s").Should(Succeed())
|
||||
})
|
||||
It("creates a new pod", func() {
|
||||
|
@ -301,7 +302,7 @@ var _ = Describe("startup tests", Ordered, func() {
|
|||
startupServer, err = testutil.K3sStartServer(startupServerArgs...)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Eventually(func() error {
|
||||
return testutil.K3sDefaultDeployments()
|
||||
return tests.CheckDefaultDeployments(testutil.DefaultConfig)
|
||||
}, "180s", "5s").Should(Succeed())
|
||||
})
|
||||
It("has the dummy pod not restarted", func() {
|
||||
|
|
Loading…
Reference in New Issue