Expand docker upgrade test, sunset E2E upgrade test

Signed-off-by: Derek Nola <derek.nola@suse.com>
pull/13369/head
Derek Nola 2025-12-02 12:14:32 -08:00
parent a6c6cd15c0
commit f6a3f0c8b5
12 changed files with 316 additions and 573 deletions

View File

@ -5,6 +5,7 @@ import (
"fmt"
"os"
"os/exec"
"strconv"
"strings"
corev1 "k8s.io/api/core/v1"
@ -66,6 +67,16 @@ func CheckDeployments(kubeconfigFile, namespace string, deployments ...string) e
return nil
}
// GetDaemonsetReady returns the number of ready pods for the given daemonset
func GetDaemonsetReady(daemonset string, kubeConfigFile string) (int, error) {
cmd := "kubectl get ds " + daemonset + " -o jsonpath='{range .items[*]}{.status.numberReady}' --kubeconfig=" + kubeConfigFile
out, err := RunCommand(cmd)
if err != nil {
return 0, err
}
return strconv.Atoi(out)
}
func ParseNodes(kubeconfigFile string) ([]corev1.Node, error) {
clientSet, err := K8sClient(kubeconfigFile)
if err != nil {

View File

@ -80,10 +80,9 @@ kubelet-arg:
It("applies network policies", func() {
_, err := config.DeployWorkload("hardened-ingress.yaml")
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "kubectl get daemonset -n default example -o jsonpath='{.status.numberReady}'"
return tests.RunCommand(cmd)
}, "60s", "5s").Should(Equal("2"))
Eventually(func() (int, error) {
return tests.GetDaemonsetReady("example", config.KubeconfigFile)
}, "60s", "5s").Should(Equal(2))
_, err = config.DeployWorkload("hardened-netpool.yaml")
Expect(err).NotTo(HaveOccurred())
})

View File

@ -0,0 +1,18 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: test-daemonset
spec:
selector:
matchLabels:
k8s-app: test-daemonset
template:
metadata:
labels:
k8s-app: test-daemonset
spec:
containers:
- name: webserver
image: rancher/mirrored-library-nginx:1.29.1-alpine
ports:
- containerPort: 80

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Pod
metadata:
name: dnsutils
namespace: default
spec:
containers:
- name: dnsutils
image: registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always

View File

@ -0,0 +1,52 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: test-ingress
spec:
rules:
- host: foo1.bar.com
http:
paths:
- backend:
service:
name: nginx-ingress-svc
port:
number: 80
path: /
pathType: ImplementationSpecific
---
apiVersion: v1
kind: Service
metadata:
name: nginx-ingress-svc
labels:
k8s-app: nginx-app-ingress
spec:
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
k8s-app: nginx-app-ingress
---
apiVersion: v1
kind: ReplicationController
metadata:
name: test-ingress
spec:
replicas: 2
selector:
k8s-app: nginx-app-ingress
template:
metadata:
labels:
k8s-app: nginx-app-ingress
spec:
terminationGracePeriodSeconds: 60
containers:
- name: testcontainer
image: rancher/mirrored-library-busybox:1.37.0
args: ['sh', '-c', 'echo Welcome to nginx! > index.html; hostname > name.html; httpd -vvf']
ports:
- containerPort: 80

View File

@ -242,12 +242,7 @@ func (config *TestConfig) ProvisionServers(numOfServers int) error {
}
// Get the IP address of the container
var cmd string
if config.DualStack {
cmd = "docker inspect --format '{{range $k,$v := .NetworkSettings.Networks}}{{printf \"%s\" $v.IPAddress}}{{end}}' " + name
} else {
cmd = "docker inspect --format '{{ .NetworkSettings.IPAddress }}' " + name
}
cmd := "docker inspect --format '{{range $k,$v := .NetworkSettings.Networks}}{{printf \"%s\" $v.IPAddress}}{{end}}' " + name
ipOutput, err := tests.RunCommand(cmd)
if err != nil {
return fmt.Errorf("failed to get container IP address: %s: %v", ipOutput, err)
@ -397,12 +392,7 @@ func (config *TestConfig) ProvisionAgents(numOfAgents int) error {
}
// Get the IP address of the container
var cmd string
if config.DualStack {
cmd = "docker inspect --format '{{range $k,$v := .NetworkSettings.Networks}}{{printf \"%s\" $v.IPAddress}}{{end}}' " + name
} else {
cmd = "docker inspect --format '{{ .NetworkSettings.IPAddress }}' " + name
}
cmd := "docker inspect --format '{{range $k,$v := .NetworkSettings.Networks}}{{printf \"%s\" $v.IPAddress}}{{end}}' " + name
ipOutput, err := tests.RunCommand(cmd)
if err != nil {
return err

View File

@ -20,9 +20,9 @@ import (
var k3sImage = flag.String("k3sImage", "", "The current commit build of K3s")
var channel = flag.String("channel", "latest", "The release channel to test")
var ci = flag.Bool("ci", false, "running on CI, forced cleanup")
var config *docker.TestConfig
var tc *docker.TestConfig
var numServers = 1
var numServers = 3
var numAgents = 1
func Test_DockerUpgrade(t *testing.T) {
@ -51,8 +51,8 @@ var _ = Describe("Upgrade Tests", Ordered, func() {
})
It("should setup environment", func() {
var err error
config, err = docker.NewTestConfig("rancher/k3s:" + latestVersion)
testID := filepath.Base(config.TestDir)
tc, err = docker.NewTestConfig("rancher/k3s:" + latestVersion)
testID := filepath.Base(tc.TestDir)
Expect(err).NotTo(HaveOccurred())
for i := 0; i < numServers; i++ {
m1 := fmt.Sprintf("--mount type=volume,src=server-%d-%s-rancher,dst=/var/lib/rancher/k3s", i, testID)
@ -68,59 +68,165 @@ var _ = Describe("Upgrade Tests", Ordered, func() {
}
})
It("should provision servers and agents", func() {
Expect(config.ProvisionServers(numServers)).To(Succeed())
Expect(config.ProvisionAgents(numAgents)).To(Succeed())
Expect(tc.ProvisionServers(numServers)).To(Succeed())
Expect(tc.ProvisionAgents(numAgents)).To(Succeed())
Eventually(func() error {
return tests.CheckDefaultDeployments(config.KubeconfigFile)
return tests.CheckDefaultDeployments(tc.KubeconfigFile)
}, "120s", "5s").Should(Succeed())
})
It("should confirm latest version", func() {
for _, server := range config.Servers {
for _, server := range tc.Servers {
out, err := server.RunCmdOnNode("k3s --version")
Expect(err).NotTo(HaveOccurred())
Expect(out).To(ContainSubstring(strings.Replace(latestVersion, "-", "+", 1)))
}
})
})
Context("Validates resource functionality", func() {
It("should deploy a test pod", func() {
_, err := config.DeployWorkload("volume-test.yaml")
_, err := tc.DeployWorkload("volume-test.yaml")
Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest")
Eventually(func() (bool, error) {
return tests.PodReady("volume-test", "kube-system", config.KubeconfigFile)
return tests.PodReady("volume-test", "kube-system", tc.KubeconfigFile)
}, "20s", "5s").Should(BeTrue())
})
It("Verifies ClusterIP Service", func() {
_, err := tc.DeployWorkload("clusterip.yaml")
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
cmd = "kubectl get svc nginx-clusterip-svc -o jsonpath='{.spec.clusterIP}'"
clusterip, _ := tests.RunCommand(cmd)
cmd = "wget -T 5 -O - -q http://" + clusterip + "/name.html"
for _, node := range tc.Servers {
Eventually(func() (string, error) {
return node.RunCmdOnNode(cmd)
}, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
}
})
It("Verifies NodePort Service", func() {
_, err := tc.DeployWorkload("nodeport.yaml")
Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed")
for _, node := range tc.Servers {
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
nodeport, err := tests.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
cmd = "curl -m 5 -s -f http://" + node.IP + ":" + nodeport + "/name.html"
fmt.Println(cmd)
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "failed cmd: "+cmd)
}
})
It("Verifies LoadBalancer Service", func() {
_, err := tc.DeployWorkload("loadbalancer-allTraffic.yaml")
Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed")
for _, node := range tc.Servers {
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
port, err := tests.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))
cmd = "curl -m 5 -s -f http://" + node.IP + ":" + port + "/ip"
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("10.42"), "failed cmd: "+cmd)
}
})
It("Verifies Ingress", func() {
_, err := tc.DeployWorkload("ingress.yaml")
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
for _, node := range tc.Servers {
cmd := "curl --header host:foo1.bar.com -m 5 -s -f http://" + node.IP + "/name.html"
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-ingress"), "failed cmd: "+cmd)
}
})
It("Verifies Daemonset", func() {
_, err := tc.DeployWorkload("daemonset.yaml")
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
nodes, _ := tests.ParseNodes(tc.KubeconfigFile)
Eventually(func(g Gomega) {
count, err := tests.GetDaemonsetReady("test-daemonset", tc.KubeconfigFile)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(nodes).To(HaveLen(count), "Daemonset pod count does not match node count")
}, "240s", "10s").Should(Succeed())
})
It("Verifies dns access", func() {
_, err := tc.DeployWorkload("dnsutils.yaml")
Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed")
Eventually(func() (string, error) {
cmd := "kubectl get pods dnsutils --kubeconfig=" + tc.KubeconfigFile
return tests.RunCommand(cmd)
}, "420s", "2s").Should(ContainSubstring("dnsutils"))
cmd := "kubectl --kubeconfig=" + tc.KubeconfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "420s", "2s").Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
})
})
Context("Upgrade to Current Commit Build", func() {
It("should upgrade to current commit build", func() {
By("Remove old servers and agents")
for _, server := range config.Servers {
for _, server := range tc.Servers {
cmd := fmt.Sprintf("docker stop %s", server.Name)
Expect(tests.RunCommand(cmd)).Error().NotTo(HaveOccurred())
cmd = fmt.Sprintf("docker rm %s", server.Name)
Expect(tests.RunCommand(cmd)).Error().NotTo(HaveOccurred())
fmt.Printf("Stopped %s\n", server.Name)
}
config.Servers = nil
tc.Servers = nil
for _, agent := range config.Agents {
for _, agent := range tc.Agents {
cmd := fmt.Sprintf("docker stop %s", agent.Name)
Expect(tests.RunCommand(cmd)).Error().NotTo(HaveOccurred())
cmd = fmt.Sprintf("docker rm %s", agent.Name)
Expect(tests.RunCommand(cmd)).Error().NotTo(HaveOccurred())
}
config.Agents = nil
tc.Agents = nil
config.K3sImage = *k3sImage
Expect(config.ProvisionServers(numServers)).To(Succeed())
Expect(config.ProvisionAgents(numAgents)).To(Succeed())
tc.K3sImage = *k3sImage
Expect(tc.ProvisionServers(numServers)).To(Succeed())
Expect(tc.ProvisionAgents(numAgents)).To(Succeed())
Eventually(func() error {
return tests.CheckDefaultDeployments(config.KubeconfigFile)
return tests.CheckDefaultDeployments(tc.KubeconfigFile)
}, "120s", "5s").Should(Succeed())
})
It("should confirm commit version", func() {
for _, server := range config.Servers {
Expect(docker.VerifyValidVersion(server, "kubectl")).To(Succeed())
Expect(docker.VerifyValidVersion(server, "ctr")).To(Succeed())
Expect(docker.VerifyValidVersion(server, "crictl")).To(Succeed())
for _, server := range tc.Servers {
Eventually(func(g Gomega) {
g.Expect(docker.VerifyValidVersion(server, "kubectl")).To(Succeed())
g.Expect(docker.VerifyValidVersion(server, "ctr")).To(Succeed())
g.Expect(docker.VerifyValidVersion(server, "crictl")).To(Succeed())
}).Should(Succeed())
out, err := server.RunCmdOnNode("k3s --version")
Expect(err).NotTo(HaveOccurred())
@ -132,12 +238,92 @@ var _ = Describe("Upgrade Tests", Ordered, func() {
Expect(out).To(ContainSubstring(cVersion))
}
})
})
Context("Validates resource functionality post-upgrade", func() {
It("should confirm test pod is still Running", func() {
Eventually(func() (bool, error) {
return tests.PodReady("volume-test", "kube-system", config.KubeconfigFile)
return tests.PodReady("volume-test", "kube-system", tc.KubeconfigFile)
}, "20s", "5s").Should(BeTrue())
})
It("Verifies ClusterIP Service", func() {
Eventually(func() (string, error) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile
return tests.RunCommand(cmd)
}, "420s", "5s").Should(ContainSubstring("test-clusterip"))
cmd := "kubectl get svc nginx-clusterip-svc -o jsonpath='{.spec.clusterIP}'"
clusterip, _ := tests.RunCommand(cmd)
cmd = "wget -T 5 -O - -q http://" + clusterip + "/name.html"
fmt.Println(cmd)
for _, node := range tc.Servers {
Eventually(func() (string, error) {
return node.RunCmdOnNode(cmd)
}, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
}
})
It("Verifies NodePort Service", func() {
for _, node := range tc.Servers {
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
nodeport, err := tests.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
cmd = "curl -m 5 -s -f http://" + node.IP + ":" + nodeport + "/name.html"
fmt.Println(cmd)
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-nodeport"))
}
})
It("Verifies LoadBalancer Service", func() {
for _, node := range tc.Servers {
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
port, err := tests.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "curl -m 5 -s -f http://" + node.IP + ":" + port + "/ip"
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("10.42"))
Eventually(func() (string, error) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))
}
})
It("Verifies Ingress", func() {
for _, node := range tc.Servers {
cmd := "curl --header host:foo1.bar.com -m 5 -s -f http://" + node.IP + "/name.html"
fmt.Println(cmd)
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "420s", "5s").Should(ContainSubstring("test-ingress"))
}
})
It("Verifies Daemonset", func() {
nodes, _ := tests.ParseNodes(tc.KubeconfigFile)
Eventually(func(g Gomega) {
count, err := tests.GetDaemonsetReady("test-daemonset", tc.KubeconfigFile)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(nodes).To(HaveLen(count), "Daemonset pod count does not match node count")
}, "240s", "10s").Should(Succeed())
})
It("Verifies dns access", func() {
Eventually(func() (string, error) {
cmd := "kubectl --kubeconfig=" + tc.KubeconfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
return tests.RunCommand(cmd)
}, "180s", "2s").Should((ContainSubstring("kubernetes.default.svc.cluster.local")))
})
})
})
@ -148,11 +334,11 @@ var _ = AfterEach(func() {
var _ = AfterSuite(func() {
if failed {
AddReportEntry("describe", docker.DescribeNodesAndPods(config))
AddReportEntry("describe", docker.DescribeNodesAndPods(tc))
AddReportEntry("docker-containers", docker.ListContainers())
AddReportEntry("docker-logs", docker.TailDockerLogs(1000, append(config.Servers, config.Agents...)))
AddReportEntry("docker-logs", docker.TailDockerLogs(1000, append(tc.Servers, tc.Agents...)))
}
if config != nil && (*ci || !failed) {
Expect(config.Cleanup()).To(Succeed())
if tc != nil && (*ci || !failed) {
Expect(tc.Cleanup()).To(Succeed())
}
})

View File

@ -260,7 +260,7 @@ var _ = DescribeTableSubtree("Verify Create", Ordered, func(startFlags string) {
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
Eventually(func(g Gomega) {
count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeconfigFile)
count, err := tests.GetDaemonsetReady("test-daemonset", tc.KubeconfigFile)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(cpNodes).To(HaveLen(count), "Daemonset pod count does not match node count")
}, "240s", "10s").Should(Succeed())

View File

@ -717,16 +717,6 @@ func GetCoverageReport(nodes []VagrantNode) error {
return nil
}
// GetDaemonsetReady returns the number of ready pods for the given daemonset
func GetDaemonsetReady(daemonset string, kubeConfigFile string) (int, error) {
cmd := "kubectl get ds " + daemonset + " -o jsonpath='{range .items[*]}{.status.numberReady}' --kubeconfig=" + kubeConfigFile
out, err := tests.RunCommand(cmd)
if err != nil {
return 0, err
}
return strconv.Atoi(out)
}
// GetPodIPs returns the IPs of all pods
func GetPodIPs(kubeConfigFile string) ([]ObjIP, error) {
cmd := `kubectl get pods -A -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIPs[*].ip}{"\n"}{end}' --kubeconfig=` + kubeConfigFile

View File

@ -1,164 +0,0 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0", "server-1", "server-2", "agent-0", "agent-1"])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['bento/ubuntu-24.04', 'bento/ubuntu-24.04', 'bento/ubuntu-24.04', 'bento/ubuntu-24.04', 'bento/ubuntu-24.04'])
RELEASE_CHANNEL = (ENV['E2E_RELEASE_CHANNEL'] || "latest")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd")
REGISTRY = (ENV['E2E_REGISTRY'] || "")
GOCOVER = (ENV['E2E_GOCOVER'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "main")
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
NETWORK_PREFIX = "10.10.11"
install_type = ""
def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = role
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}"
vm.network "private_network", ip: node_ip, netmask: "255.255.255.0"
scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts"
vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults
defaultOSConfigure(vm)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH, RELEASE_CHANNEL)
vm.provision "shell", inline: "ping -c 2 k3s.io"
db_type = getDBType(role, role_num, vm)
if !REGISTRY.empty?
vm.provision "Set private registry", type: "shell", path: scripts_location + "/registry.sh", args: [ "#{NETWORK_PREFIX}.1" ]
end
addCoverageDir(vm, role, GOCOVER)
if role.include?("server") && role_num == 0
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s|
k3s.args = "server "
k3s.config = <<~YAML
token: vagrant
node-external-ip: #{NETWORK_PREFIX}.100
flannel-iface: eth1
tls-san: #{NETWORK_PREFIX}.100.nip.io
#{db_type}
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
elsif role.include?("server") && role_num != 0
vm.provision 'k3s-secondary-server', type: 'k3s', run: 'once' do |k3s|
k3s.args = "server"
k3s.config = <<~YAML
server: "https://#{NETWORK_PREFIX}.100:6443"
token: vagrant
node-external-ip: #{node_ip}
flannel-iface: eth1
#{db_type}
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end
if role.include?("agent")
vm.provision 'k3s-agent', type: 'k3s', run: 'once' do |k3s|
k3s.args = "agent"
k3s.config = <<~YAML
server: "https://#{NETWORK_PREFIX}.100:6443"
token: vagrant
node-external-ip: #{node_ip}
flannel-iface: eth1
#{db_type}
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end
if vm.box.to_s.include?("microos")
vm.provision 'k3s-reload', type: 'reload', run: 'once'
if !EXTERNAL_DB.empty?
vm.provision "shell", inline: "docker start #{EXTERNAL_DB}"
end
end
end
def getDBType(role, role_num, vm)
if EXTERNAL_DB == "mariadb"
if role.include?("server") && role_num == 0
dockerInstall(vm)
vm.provision "Start mariaDB", inline: "docker run -d -p 3306:3306 --name #{EXTERNAL_DB} -e MARIADB_ROOT_PASSWORD=e2e mariadb:11"
vm.provision "shell", inline: "echo \"Wait for mariaDB to startup\"; sleep 10"
return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/k3s'"
elsif role.include?("server") && role_num != 0
return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/k3s'"
end
elsif EXTERNAL_DB == "mysql"
if role.include?("server") && role_num == 0
dockerInstall(vm)
vm.provision "Start mysql", inline: "docker run -d -p 3306:3306 --name #{EXTERNAL_DB} -e MYSQL_ROOT_PASSWORD=e2e mysql:5.7"
vm.provision "shell", inline: "echo \"Wait for mysql to startup\"; sleep 10"
return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/k3s'"
elsif role.include?("server") && role_num != 0
return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/k3s'"
end
elsif EXTERNAL_DB == "postgres"
if role.include?("server") && role_num == 0
dockerInstall(vm)
vm.provision "Start postgres", inline: "docker run -d -p 5432:5432 --name #{EXTERNAL_DB} -e POSTGRES_PASSWORD=e2e postgres:14-alpine"
vm.provision "shell", inline: "echo \"Wait for postgres to startup\"; sleep 10"
return "datastore-endpoint: 'postgres://postgres:e2e@#{NETWORK_PREFIX}.100:5432/k3s?sslmode=disable'"
elsif role.include?("server") && role_num != 0
return "datastore-endpoint: 'postgres://postgres:e2e@#{NETWORK_PREFIX}.100:5432/k3s?sslmode=disable'"
end
elsif ( EXTERNAL_DB == "" || EXTERNAL_DB == "etcd" )
if role.include?("server") && role_num == 0
return "cluster-init: true"
end
elsif ( EXTERNAL_DB == "none" )
# Use internal sqlite
else
puts "Unknown EXTERNAL_DB: " + EXTERNAL_DB
abort
end
return ""
end
Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
# Default provider is libvirt, virtualbox is only provided as a backup
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
# We replicate the default prefix, but add a timestamp to enable parallel runs and cleanup of old VMs
v.default_prefix = File.basename(Dir.getwd) + "_" + Time.now.to_i.to_s + "_"
end
config.vm.provider "virtualbox" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end
NODE_ROLES.each_with_index do |role, i|
role_num = role.split("-", -1).pop.to_i
config.vm.define role do |node|
provision(node.vm, role, role_num, i)
end
end
end

View File

@ -1,353 +0,0 @@
package upgradecluster
import (
"flag"
"fmt"
"os"
"testing"
"github.com/k3s-io/k3s/tests"
"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Valid nodeOS:
// bento/ubuntu-24.04, eurolinux-vagrant/rocky-8, eurolinux-vagrant/rocky-9
// opensuse/Leap-15.6.x86_64
var nodeOS = flag.String("nodeOS", "bento/ubuntu-24.04", "VM operating system")
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 2, "number of agent nodes")
var hardened = flag.Bool("hardened", false, "true or false")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "Controls which version k3s upgrades too, local binary or latest commit on main")
// Environment Variables Info:
// E2E_REGISTRY: true/false (default: false)
// Controls which K3s version is installed first
// E2E_RELEASE_VERSION=v1.23.3+k3s1
// OR
// E2E_RELEASE_CHANNEL=(commit|latest|stable), commit pulls latest commit from main
func Test_E2EUpgradeValidation(t *testing.T) {
RegisterFailHandler(Fail)
flag.Parse()
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "Upgrade Cluster Test Suite", suiteConfig, reporterConfig)
}
var tc *e2e.TestConfig
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify Upgrade", Ordered, func() {
Context("Cluster :", func() {
It("Starts up with no issues", func() {
var err error
tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
tc.Hardened = *hardened
By("CLUSTER CONFIG")
By("OS: " + *nodeOS)
By(tc.Status())
Expect(err).NotTo(HaveOccurred())
})
It("Checks node and pod status", func() {
By("Fetching Nodes status")
Eventually(func() error {
return tests.NodesReady(tc.KubeconfigFile, e2e.VagrantSlice(tc.AllNodes()))
}, "620s", "5s").Should(Succeed())
Eventually(func() error {
return tests.AllPodsUp(tc.KubeconfigFile, "kube-system")
}, "620s", "5s").Should(Succeed())
e2e.DumpPods(tc.KubeconfigFile)
})
It("Verifies ClusterIP Service", func() {
_, err := tc.DeployWorkload("clusterip.yaml")
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
clusterip, _ := e2e.FetchClusterIP(tc.KubeconfigFile, "nginx-clusterip-svc", false)
cmd = "curl -m 5 -s -f http://" + clusterip + "/name.html"
for _, node := range tc.Servers {
Eventually(func() (string, error) {
return node.RunCmdOnNode(cmd)
}, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
}
})
It("Verifies NodePort Service", func() {
_, err := tc.DeployWorkload("nodeport.yaml")
Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed")
for _, node := range tc.Servers {
nodeExternalIP, _ := node.FetchNodeExternalIP()
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
nodeport, err := tests.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
cmd = "curl -m 5 -s -f http://" + nodeExternalIP + ":" + nodeport + "/name.html"
fmt.Println(cmd)
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "failed cmd: "+cmd)
}
})
It("Verifies LoadBalancer Service", func() {
_, err := tc.DeployWorkload("loadbalancer.yaml")
Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed")
for _, node := range tc.Servers {
ip, _ := node.FetchNodeExternalIP()
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
port, err := tests.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))
cmd = "curl -m 5 -s -f http://" + ip + ":" + port + "/name.html"
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"), "failed cmd: "+cmd)
}
})
It("Verifies Ingress", func() {
_, err := tc.DeployWorkload("ingress.yaml")
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
for _, node := range tc.Servers {
ip, _ := node.FetchNodeExternalIP()
cmd := "curl -m 5 -s -f -H 'Host: foo1.bar.com' http://" + ip + "/name.html"
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-ingress"), "failed cmd: "+cmd)
}
})
It("Verifies Daemonset", func() {
_, err := tc.DeployWorkload("daemonset.yaml")
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
nodes, _ := tests.ParseNodes(tc.KubeconfigFile)
Eventually(func(g Gomega) {
count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeconfigFile)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(nodes).To(HaveLen(count), "Daemonset pod count does not match node count")
}, "240s", "10s").Should(Succeed())
})
It("Verifies dns access", func() {
_, err := tc.DeployWorkload("dnsutils.yaml")
Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed")
Eventually(func() (string, error) {
cmd := "kubectl get pods dnsutils --kubeconfig=" + tc.KubeconfigFile
return tests.RunCommand(cmd)
}, "420s", "2s").Should(ContainSubstring("dnsutils"))
cmd := "kubectl --kubeconfig=" + tc.KubeconfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "420s", "2s").Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
})
It("Verifies Local Path Provisioner storage ", func() {
_, err := tc.DeployWorkload("local-path-provisioner.yaml")
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
Eventually(func(g Gomega) {
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + tc.KubeconfigFile
res, err := tests.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(res).Should(ContainSubstring("local-path-pvc"))
g.Expect(res).Should(ContainSubstring("Bound"))
}, "240s", "2s").Should(Succeed())
Eventually(func(g Gomega) {
cmd := "kubectl get pod volume-test --kubeconfig=" + tc.KubeconfigFile
res, err := tests.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(res).Should(ContainSubstring("volume-test"))
g.Expect(res).Should(ContainSubstring("Running"))
}, "420s", "2s").Should(Succeed())
cmd := "kubectl --kubeconfig=" + tc.KubeconfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
res, err := tests.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res)
fmt.Println("Data stored in pvc: local-path-test")
cmd = "kubectl delete pod volume-test --kubeconfig=" + tc.KubeconfigFile
res, err = tests.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res)
_, err = tc.DeployWorkload("local-path-provisioner.yaml")
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
Eventually(func() (string, error) {
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + tc.KubeconfigFile
return tests.RunCommand(cmd)
}, "420s", "2s").Should(ContainSubstring("local-path-provisioner"))
Eventually(func(g Gomega) {
cmd := "kubectl get pod volume-test --kubeconfig=" + tc.KubeconfigFile
res, err := tests.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(res).Should(ContainSubstring("volume-test"))
g.Expect(res).Should(ContainSubstring("Running"))
}, "420s", "2s").Should(Succeed())
// Check data after re-creation
Eventually(func() (string, error) {
cmd := "kubectl exec volume-test --kubeconfig=" + tc.KubeconfigFile + " -- cat /data/test"
return tests.RunCommand(cmd)
}, "180s", "2s").Should(ContainSubstring("local-path-test"), "Failed to retrieve data from pvc")
})
It("Upgrades with no issues", func() {
var err error
Expect(e2e.UpgradeCluster(tc.AllNodes(), *local)).To(Succeed())
Expect(e2e.RestartCluster(tc.AllNodes())).To(Succeed())
fmt.Println("CLUSTER UPGRADED")
tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].Name)
Expect(err).NotTo(HaveOccurred())
})
It("After upgrade Checks Node and Pod Status", func() {
By("Fetching Nodes status")
Eventually(func() error {
return tests.NodesReady(tc.KubeconfigFile, e2e.VagrantSlice(tc.AllNodes()))
}, "360s", "5s").Should(Succeed())
e2e.DumpNodes(tc.KubeconfigFile)
By("Fetching Pod status")
tests.AllPodsUp(tc.KubeconfigFile, "kube-system")
e2e.DumpPods(tc.KubeconfigFile)
})
It("After upgrade verifies ClusterIP Service", func() {
Eventually(func() (string, error) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile
return tests.RunCommand(cmd)
}, "420s", "5s").Should(ContainSubstring("test-clusterip"))
clusterip, _ := e2e.FetchClusterIP(tc.KubeconfigFile, "nginx-clusterip-svc", false)
cmd := "curl -m 5 -s -f http://" + clusterip + "/name.html"
fmt.Println(cmd)
for _, node := range tc.Servers {
Eventually(func() (string, error) {
return node.RunCmdOnNode(cmd)
}, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
}
})
It("After upgrade verifies NodePort Service", func() {
for _, node := range tc.Servers {
nodeExternalIP, _ := node.FetchNodeExternalIP()
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
nodeport, err := tests.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
cmd = "curl -m 5 -s -f http://" + nodeExternalIP + ":" + nodeport + "/name.html"
fmt.Println(cmd)
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-nodeport"))
}
})
It("After upgrade verifies LoadBalancer Service", func() {
for _, node := range tc.Servers {
ip, _ := node.FetchNodeExternalIP()
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
port, err := tests.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "curl -m 5 -s -f http://" + ip + ":" + port + "/name.html"
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))
Eventually(func() (string, error) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile
return tests.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))
}
})
It("After upgrade verifies Ingress", func() {
for _, node := range tc.Servers {
ip, _ := node.FetchNodeExternalIP()
cmd := "curl -m 5 -s -f -H 'Host: foo1.bar.com' http://" + ip + "/name.html"
fmt.Println(cmd)
Eventually(func() (string, error) {
return tests.RunCommand(cmd)
}, "420s", "5s").Should(ContainSubstring("test-ingress"))
}
})
It("After upgrade verifies Daemonset", func() {
nodes, _ := tests.ParseNodes(tc.KubeconfigFile)
Eventually(func(g Gomega) {
count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeconfigFile)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(nodes).To(HaveLen(count), "Daemonset pod count does not match node count")
}, "240s", "10s").Should(Succeed())
})
It("After upgrade verifies dns access", func() {
Eventually(func() (string, error) {
cmd := "kubectl --kubeconfig=" + tc.KubeconfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
return tests.RunCommand(cmd)
}, "180s", "2s").Should((ContainSubstring("kubernetes.default.svc.cluster.local")))
})
It("After upgrade verify Local Path Provisioner storage ", func() {
Eventually(func() (string, error) {
cmd := "kubectl exec volume-test --kubeconfig=" + tc.KubeconfigFile + " -- cat /data/test"
return tests.RunCommand(cmd)
}, "180s", "2s").Should(ContainSubstring("local-path-test"))
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if failed {
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, tc.AllNodes()))
} else {
Expect(e2e.GetCoverageReport(tc.AllNodes())).To(Succeed())
}
if !failed || *ci {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(tc.KubeconfigFile)).To(Succeed())
}
})

View File

@ -170,7 +170,7 @@ var _ = Describe("Verify Create", Ordered, func() {
nodes, _ := tests.ParseNodes(tc.KubeconfigFile)
Eventually(func(g Gomega) {
count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeconfigFile)
count, err := tests.GetDaemonsetReady("test-daemonset", tc.KubeconfigFile)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(nodes).To(HaveLen(count), "Daemonset pod count does not match node count")
}, "240s", "10s").Should(Succeed())
@ -267,7 +267,7 @@ var _ = Describe("Verify Create", Ordered, func() {
}
}
}
count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeconfigFile)
count, err := tests.GetDaemonsetReady("test-daemonset", tc.KubeconfigFile)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pods that are ready does not match node count")
}, "620s", "5s").Should(Succeed())