Convert legacy docker tests from bash to golang (#11357)

* Convert the following Docker test from Bash to Go
    - basics
    - bootstraptoken
    - cacerts
    - compat -> skew
    - etcd
    - lazypull
    - upgrade

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Add Docker go tests to GHA
* Prebuild K3s Go Tests
* Strip go test binaries to reduce size
* Handle complex branch options

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Implement basic golang tests on arm and arm64 pipelines

Signed-off-by: Derek Nola <derek.nola@suse.com>
pull/11373/head
Derek Nola 2024-11-26 12:30:52 -08:00 committed by GitHub
parent 1b7dd765a5
commit b5e2fa77a6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 1430 additions and 1 deletions

View File

@ -121,3 +121,85 @@ jobs:
. ./tests/docker/test-helpers
. ./tests/docker/test-run-${{ matrix.dtest }}
echo "Did test-run-${{ matrix.dtest }} pass $?"
build-go-tests:
name: "Build Go Tests"
runs-on: ubuntu-latest
outputs:
branch_name: ${{ steps.branch_step.outputs.BRANCH_NAME }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install Go
uses: ./.github/actions/setup-go
- name: Build Go Tests
run: |
mkdir -p ./dist/artifacts
go test -c -ldflags="-w -s" -o ./dist/artifacts ./tests/docker/...
- name: Upload Go Tests
uses: actions/upload-artifact@v4
with:
name: docker-go-tests
path: ./dist/artifacts/*.test
compression-level: 9
retention-days: 1
# For upgrade and skew tests, we need to know the branch name this run is based off.
# Since this is predetermined, we can run this step before the docker-go job, saving time.
# For PRs we can use the base_ref (ie the target branch of the PR).
# For pushes to k3s-io/k3s, the branch_name is a valid ref, master or release-x.y.
# For pushes to a fork, we need to determine the branch name by finding the parent branch from git show-branch history.
- name: Determine branch name
id: branch_step
run: |
if [ ${{ github.repository }} = "k3s-io/k3s" ]; then
BRANCH_NAME=$(echo ${{ github.base_ref || github.ref_name }})
elif [ -z "${{ github.base_ref }}" ]; then
# We are in a fork, and need some git history to determine the branch name
git fetch origin --depth=100 +refs/heads/*:refs/remotes/origin/*
BRANCH_NAME=$(git show-branch -a 2> /dev/null | grep '\*' | grep -v `git rev-parse --abbrev-ref HEAD` | head -n1 | sed 's/.*\[\(.*\/\)\(.*\)\].*/\2/' | sed 's/[\^~].*//')
else
BRANCH_NAME=${{ github.base_ref }}
fi
echo "Branch Name is $BRANCH_NAME"
echo "BRANCH_NAME=$BRANCH_NAME" >> $GITHUB_OUTPUT
docker-go:
needs: [build, build-go-tests]
name: Docker Tests In GO
runs-on: ubuntu-latest
timeout-minutes: 20
strategy:
fail-fast: false
matrix:
dtest: [basics, bootstraptoken, cacerts, etcd, lazypull, skew, upgrade]
env:
BRANCH_NAME: ${{ needs.build-go-tests.outputs.branch_name }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: "Download K3s image"
uses: actions/download-artifact@v4
with:
name: k3s
path: ./dist/artifacts
- name: Load and set K3s image
run: |
docker image load -i ./dist/artifacts/k3s-image.tar
IMAGE_TAG=$(docker image ls --format '{{.Repository}}:{{.Tag}}' | grep 'rancher/k3s')
echo "K3S_IMAGE=$IMAGE_TAG" >> $GITHUB_ENV
- name: Download Go Tests
uses: actions/download-artifact@v4
with:
name: docker-go-tests
path: ./dist/artifacts
- name: Run ${{ matrix.dtest }} Test
# Put the compied test binary back in the same place as the test source
run: |
chmod +x ./dist/artifacts/${{ matrix.dtest }}.test
mv ./dist/artifacts/${{ matrix.dtest }}.test ./tests/docker/${{ matrix.dtest }}/
cd ./tests/docker/${{ matrix.dtest }}
if [ ${{ matrix.dtest }} = "upgrade" ] || [ ${{ matrix.dtest }} = "skew" ]; then
./${{ matrix.dtest }}.test -k3sImage=$K3S_IMAGE -branch=$BRANCH_NAME
else
./${{ matrix.dtest }}.test -k3sImage=$K3S_IMAGE
fi

2
go.mod
View File

@ -141,6 +141,7 @@ require (
go.etcd.io/etcd/server/v3 v3.5.16
go.uber.org/zap v1.27.0
golang.org/x/crypto v0.27.0
golang.org/x/mod v0.20.0
golang.org/x/net v0.29.0
golang.org/x/sync v0.8.0
golang.org/x/sys v0.25.0
@ -447,7 +448,6 @@ require (
go.uber.org/mock v0.4.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/mod v0.20.0 // indirect
golang.org/x/oauth2 v0.22.0 // indirect
golang.org/x/term v0.24.0 // indirect
golang.org/x/text v0.18.0 // indirect

View File

@ -23,6 +23,10 @@ docker ps
# Only run basic tests on non amd64 archs, we use GitHub Actions for amd64
if [ "$ARCH" != 'amd64' ]; then
export K3S_IMAGE="rancher/k3s:${VERSION_TAG}${SUFFIX}"
go test ./tests/docker/basics/basics_test.go -k3sImage="$K3S_IMAGE"
echo "Did go test basics $?"
. ./tests/docker/test-run-basics
echo "Did test-run-basics $?"

View File

@ -0,0 +1,121 @@
package main
import (
"flag"
"fmt"
"os"
"strings"
"testing"
tester "github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers")
var config *tester.TestConfig
func Test_DockerBasic(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "Basic Docker Test Suite")
}
var _ = Describe("Basic Tests", Ordered, func() {
Context("Setup Cluster", func() {
It("should provision servers and agents", func() {
var err error
config, err = tester.NewTestConfig(*k3sImage)
Expect(err).NotTo(HaveOccurred())
Expect(config.ProvisionServers(1)).To(Succeed())
Expect(config.ProvisionAgents(1)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
Eventually(func() error {
return tester.NodesReady(config.KubeconfigFile)
}, "40s", "5s").Should(Succeed())
})
})
Context("Use Local Storage Volume", func() {
It("should apply local storage volume", func() {
const volumeTestManifest = "../resources/volume-test.yaml"
// Apply the manifest
cmd := fmt.Sprintf("kubectl apply -f %s --kubeconfig=%s", volumeTestManifest, config.KubeconfigFile)
_, err := tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest")
})
It("should validate local storage volume", func() {
Eventually(func() (bool, error) {
return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile)
}, "20s", "5s").Should(BeTrue())
})
})
Context("Verify Binaries and Images", func() {
It("has valid bundled binaries", func() {
for _, server := range config.Servers {
Expect(tester.VerifyValidVersion(server.Name, "kubectl")).To(Succeed())
Expect(tester.VerifyValidVersion(server.Name, "ctr")).To(Succeed())
Expect(tester.VerifyValidVersion(server.Name, "crictl")).To(Succeed())
}
})
It("has valid airgap images", func() {
Expect(config).To(Not(BeNil()))
err := VerifyAirgapImages(config)
Expect(err).NotTo(HaveOccurred())
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if config != nil && !failed {
config.Cleanup()
}
})
// VerifyAirgapImages checks for changes in the airgap image list
func VerifyAirgapImages(config *tester.TestConfig) error {
// This file is generated during the build packaging step
const airgapImageList = "../../../scripts/airgap/image-list.txt"
// Use a map to automatically handle duplicates
imageSet := make(map[string]struct{})
// Collect all images from nodes
for _, node := range config.GetNodeNames() {
cmd := fmt.Sprintf("docker exec %s crictl images -o json | jq -r '.images[].repoTags[0] | select(. != null)'", node)
output, err := tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to execute crictl and jq: %v", err)
for _, line := range strings.Split(strings.TrimSpace(string(output)), "\n") {
if line != "" {
imageSet[line] = struct{}{}
}
}
}
// Convert map keys to slice
uniqueImages := make([]string, 0, len(imageSet))
for image := range imageSet {
uniqueImages = append(uniqueImages, image)
}
existing, err := os.ReadFile(airgapImageList)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to read airgap list file: %v", err)
}
// Sorting doesn't matter with ConsistOf
existingImages := strings.Split(strings.TrimSpace(string(existing)), "\n")
Expect(existingImages).To(ConsistOf(uniqueImages))
return nil
}

View File

@ -0,0 +1,67 @@
package main
import (
"flag"
"strings"
"testing"
tester "github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers")
var config *tester.TestConfig
func Test_DockerBootstrapToken(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "BoostrapToken Docker Test Suite")
}
var _ = Describe("Boostrap Token Tests", Ordered, func() {
Context("Setup Cluster", func() {
It("should provision servers", func() {
var err error
config, err = tester.NewTestConfig(*k3sImage)
Expect(err).NotTo(HaveOccurred())
Expect(config.ProvisionServers(1)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
})
})
Context("Add Agent with Bootstrap token", func() {
var newSecret string
It("creates a bootstrap token", func() {
var err error
newSecret, err = tester.RunCmdOnDocker(config.Servers[0].Name, "k3s token create --ttl=5m --description=Test")
Expect(err).NotTo(HaveOccurred())
Expect(newSecret).NotTo(BeEmpty())
})
It("joins the agent with the new tokens", func() {
newSecret = strings.ReplaceAll(newSecret, "\n", "")
config.Secret = newSecret
Expect(config.ProvisionAgents(1)).To(Succeed())
Eventually(func(g Gomega) {
nodes, err := tester.ParseNodes(config.KubeconfigFile)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(nodes).To(HaveLen(2))
g.Expect(tester.NodesReady(config.KubeconfigFile)).To(Succeed())
}, "40s", "5s").Should(Succeed())
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if config != nil && !failed {
config.Cleanup()
}
})

View File

@ -0,0 +1,103 @@
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
tester "github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers")
var config *tester.TestConfig
var testID string
func Test_DockerCACerts(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "CA Certs Docker Test Suite")
}
var _ = Describe("CA Certs Tests", Ordered, func() {
Context("Setup Cluster", func() {
// TODO determine if the below is still true
// This test runs in docker mounting the docker socket,
// so we can't directly mount files into the test containers. Instead we have to
// run a dummy container with a volume, copy files into that volume, and then
// share it with the other containers that need the file.
It("should configure CA certs", func() {
var err error
config, err = tester.NewTestConfig(*k3sImage)
Expect(err).NotTo(HaveOccurred())
Expect(os.MkdirAll(filepath.Join(config.TestDir, "pause"), 0755)).To(Succeed())
testID = filepath.Base(config.TestDir)
pauseName := fmt.Sprintf("k3s-pause-%s", strings.ToLower(testID))
tlsMount := fmt.Sprintf("--mount type=volume,src=%s,dst=/var/lib/rancher/k3s/server/tls", pauseName)
cmd := fmt.Sprintf("docker run -d --name %s --hostname %s %s rancher/mirrored-pause:3.6",
pauseName, pauseName, tlsMount)
_, err = tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
dataDir := filepath.Join(config.TestDir, "pause/k3s")
cmd = fmt.Sprintf("DATA_DIR=%s ../../../contrib/util/generate-custom-ca-certs.sh", dataDir)
_, err = tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
cmd = fmt.Sprintf("docker cp %s %s:/var/lib/rancher", dataDir, pauseName)
_, err = tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
// Set SERVER_ARGS to include the custom CA certs
os.Setenv("SERVER_DOCKER_ARGS", tlsMount)
})
It("should provision servers and agents", func() {
Expect(config.ProvisionServers(1)).To(Succeed())
Expect(config.ProvisionAgents(1)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
})
})
Context("Verify Custom CA Certs", func() {
It("should have custom CA certs", func() {
// Add your custom CA certs verification logic here
// Example: Check if the custom CA certs are present in the server container
for _, server := range config.Servers {
cmd := fmt.Sprintf("docker exec %s ls /var/lib/rancher/k3s/server/tls", server.Name)
output, err := tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to list custom CA certs: %v", err)
Expect(output).To(ContainSubstring("ca.crt"))
}
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if config != nil && !failed {
config.Cleanup()
cmd := fmt.Sprintf("docker stop k3s-pause-%s", testID)
_, err := tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
cmd = fmt.Sprintf("docker rm k3s-pause-%s", testID)
_, err = tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
cmd = fmt.Sprintf("docker volume ls -q | grep -F %s | xargs -r docker volume rm -f", testID)
_, err = tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
}
})

View File

@ -0,0 +1,79 @@
package main
import (
"flag"
"os"
"testing"
tester "github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers")
var config *tester.TestConfig
func Test_DockerEtcd(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "Etcd Docker Test Suite")
}
var _ = Describe("Etcd Tests", Ordered, func() {
Context("Test a 3 server cluster", func() {
It("should setup the cluster configuration", func() {
var err error
config, err = tester.NewTestConfig(*k3sImage)
Expect(err).NotTo(HaveOccurred())
})
It("should provision servers", func() {
Expect(config.ProvisionServers(3)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(tester.ParseNodes(config.KubeconfigFile)).To(HaveLen(3))
g.Expect(tester.NodesReady(config.KubeconfigFile)).To(Succeed())
}, "60s", "5s").Should(Succeed())
})
It("should destroy the cluster", func() {
Expect(config.Cleanup()).To(Succeed())
})
})
Context("Test a Split Role cluster with 3 etcd, 2 control-plane, 1 agents", func() {
It("should setup the cluster configuration", func() {
var err error
config, err = tester.NewTestConfig(*k3sImage)
Expect(err).NotTo(HaveOccurred())
Expect(os.Setenv("SERVER_0_ARGS", "--disable-apiserver --disable-controller-manager --disable-scheduler --cluster-init")).To(Succeed())
Expect(os.Setenv("SERVER_1_ARGS", "--disable-apiserver --disable-controller-manager --disable-scheduler")).To(Succeed())
Expect(os.Setenv("SERVER_2_ARGS", "--disable-apiserver --disable-controller-manager --disable-scheduler")).To(Succeed())
Expect(os.Setenv("SERVER_3_ARGS", "--disable-etcd")).To(Succeed())
Expect(os.Setenv("SERVER_4_ARGS", "--disable-etcd")).To(Succeed())
})
It("should provision servers and agents", func() {
Expect(config.ProvisionServers(5)).To(Succeed())
Expect(config.ProvisionAgents(1)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "90s", "5s").Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(tester.ParseNodes(config.KubeconfigFile)).To(HaveLen(6))
g.Expect(tester.NodesReady(config.KubeconfigFile)).To(Succeed())
}, "60s", "5s").Should(Succeed())
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if config != nil && !failed {
config.Cleanup()
}
})

View File

@ -0,0 +1,136 @@
package main
import (
"flag"
"fmt"
"os"
"strings"
"testing"
tester "github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers")
var config *tester.TestConfig
func Test_DockerLazyPull(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "LazyPull Docker Test Suite")
}
var _ = Describe("LazyPull Tests", Ordered, func() {
Context("Setup Cluster", func() {
It("should provision servers", func() {
var err error
config, err = tester.NewTestConfig(*k3sImage)
Expect(err).NotTo(HaveOccurred())
Expect(os.Setenv("SERVER_ARGS", "--snapshotter=stargz")).To(Succeed())
Expect(config.ProvisionServers(1)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
Eventually(func() error {
return tester.NodesReady(config.KubeconfigFile)
}, "40s", "5s").Should(Succeed())
})
})
Context("Use Snapshot Container", func() {
It("should apply local storage volume", func() {
const snapshotTestManifest = "../resources/snapshot-test.yaml"
// Apply the manifest
cmd := fmt.Sprintf("kubectl apply -f %s --kubeconfig=%s", snapshotTestManifest, config.KubeconfigFile)
_, err := tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest")
})
It("should have the pod come up", func() {
Eventually(func() (bool, error) {
return tester.PodReady("stargz-snapshot-test", "default", config.KubeconfigFile)
}, "30s", "5s").Should(BeTrue())
})
var topLayer string
It("extracts the topmost layer of the container", func() {
Eventually(func() (string, error) {
var err error
topLayer, err = getTopmostLayer(config.Servers[0].Name, "stargz-snapshot-test")
topLayer = strings.TrimSpace(topLayer)
return topLayer, err
}, "30s", "5s").ShouldNot(BeEmpty())
fmt.Println("Topmost layer: ", topLayer)
})
It("checks all layers are remote snapshots", func() {
Expect(lookLayers(config.Servers[0].Name, topLayer)).To(Succeed())
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if config != nil && !failed {
config.Cleanup()
}
})
func lookLayers(node, layer string) error {
remoteSnapshotLabel := "containerd.io/snapshot/remote"
layersNum := 0
var err error
for layersNum = 0; layersNum < 100; layersNum++ {
// We use RunCommand instead of RunCmdOnDocker because we pipe the output to jq
cmd := fmt.Sprintf("docker exec -i %s ctr --namespace=k8s.io snapshot --snapshotter=stargz info %s | jq -r '.Parent'", node, layer)
layer, err = tester.RunCommand(cmd)
if err != nil {
return fmt.Errorf("failed to get parent layer: %v", err)
}
layer = strings.TrimSpace(layer)
// If the layer is null, we have reached the topmost layer
if layer == "null" {
break
}
cmd = fmt.Sprintf("docker exec -i %s ctr --namespace=k8s.io snapshots --snapshotter=stargz info %s | jq -r '.Labels.\"%s\"'", node, layer, remoteSnapshotLabel)
label, err := tester.RunCommand(cmd)
if err != nil {
return fmt.Errorf("failed to get layer label: %v", err)
}
label = strings.TrimSpace(label)
fmt.Printf("Checking layer %s : %s\n", layer, label)
if label == "null" {
return fmt.Errorf("layer %s isn't remote snapshot", layer)
}
}
if layersNum == 0 {
return fmt.Errorf("cannot get layers")
} else if layersNum >= 100 {
return fmt.Errorf("testing image contains too many layers > 100")
}
return nil
}
func getTopmostLayer(node, container string) (string, error) {
var targetContainer string
cmd := fmt.Sprintf("docker exec -i %s ctr --namespace=k8s.io c ls -q labels.\"io.kubernetes.container.name\"==\"%s\" | sed -n 1p", node, container)
targetContainer, _ = tester.RunCommand(cmd)
targetContainer = strings.TrimSpace(targetContainer)
fmt.Println("targetContainer: ", targetContainer)
if targetContainer == "" {
return "", fmt.Errorf("failed to get target container")
}
cmd = fmt.Sprintf("docker exec -i %s ctr --namespace=k8s.io c info %s | jq -r '.SnapshotKey'", node, targetContainer)
layer, err := tester.RunCommand(cmd)
if err != nil {
return "", fmt.Errorf("failed to get topmost layer: %v", err)
}
return strings.TrimSpace(layer), nil
}

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Pod
metadata:
name: stargz-snapshot-test
spec:
containers:
- name: stargz-snapshot-test
image: "ghcr.io/stargz-containers/k3s-test-ubuntu:20.04-esgz"
command: ["sleep"]
args: ["infinity"]

View File

@ -0,0 +1,30 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: local-path-pvc
namespace: kube-system
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: Pod
metadata:
name: volume-test
namespace: kube-system
spec:
containers:
- name: volume-test
image: rancher/mirrored-pause:3.6
imagePullPolicy: IfNotPresent
volumeMounts:
- name: volv
mountPath: /data
volumes:
- name: volv
persistentVolumeClaim:
claimName: local-path-pvc

View File

@ -0,0 +1,146 @@
package main
import (
"flag"
"fmt"
"strings"
"testing"
"github.com/blang/semver/v4"
tester "github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Using these two flags, we upgrade from the latest release of <branch> to
// the current commit build of K3s defined by <k3sImage>
var k3sImage = flag.String("k3sImage", "", "The current commit build of K3s")
var branch = flag.String("branch", "master", "The release branch to test")
var config *tester.TestConfig
var numServers = 1
var numAgents = 1
func Test_DockerSkew(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "Skew Docker Test Suite")
}
var lastMinorVersion string
var _ = BeforeSuite(func() {
// If this test runs on v1.31 commit, we want the latest v1.30 release
// For master and unreleased branches, we want the latest stable release
var upgradeChannel string
var err error
if *branch == "master" {
upgradeChannel = "stable"
} else {
upgradeChannel = strings.Replace(*branch, "release-", "v", 1)
// now that it is in v1.1 format, we want to substract one from the minor version
// to get the previous release
sV, err := semver.Parse(upgradeChannel)
Expect(err).NotTo(HaveOccurred())
sV.Minor--
upgradeChannel = sV.String()
}
lastMinorVersion, err = tester.GetVersionFromChannel(upgradeChannel)
Expect(err).NotTo(HaveOccurred())
Expect(lastMinorVersion).To(ContainSubstring("v1."))
fmt.Println("Using last minor version: ", lastMinorVersion)
})
var _ = Describe("Skew Tests", Ordered, func() {
Context("Setup Cluster with Server newer than Agent", func() {
It("should provision new servers and old agents", func() {
var err error
config, err = tester.NewTestConfig(*k3sImage)
Expect(err).NotTo(HaveOccurred())
Expect(config.ProvisionServers(numServers)).To(Succeed())
config.K3sImage = "rancher/k3s:" + lastMinorVersion
Expect(config.ProvisionAgents(numAgents)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
})
It("should match respective versions", func() {
for _, server := range config.Servers {
out, err := tester.RunCmdOnDocker(server.Name, "k3s --version")
Expect(err).NotTo(HaveOccurred())
// The k3s image is in the format rancher/k3s:v1.20.0-k3s1
cVersion := strings.Split(*k3sImage, ":")[1]
cVersion = strings.Replace(cVersion, "-amd64", "", 1)
cVersion = strings.Replace(cVersion, "-", "+", 1)
Expect(out).To(ContainSubstring(cVersion))
}
for _, agent := range config.Agents {
Expect(tester.RunCmdOnDocker(agent.Name, "k3s --version")).
To(ContainSubstring(strings.Replace(lastMinorVersion, "-", "+", 1)))
}
})
It("should deploy a test pod", func() {
const volumeTestManifest = "../resources/volume-test.yaml"
// Apply the manifest
cmd := fmt.Sprintf("kubectl apply -f %s --kubeconfig=%s", volumeTestManifest, config.KubeconfigFile)
_, err := tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest")
Eventually(func() (bool, error) {
return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile)
}, "20s", "5s").Should(BeTrue())
})
It("should destroy the cluster", func() {
Expect(config.Cleanup()).To(Succeed())
})
})
Context("Test cluster with 1 Server older and 2 Servers newer", func() {
It("should setup the cluster configuration", func() {
var err error
config, err = tester.NewTestConfig("rancher/k3s:" + lastMinorVersion)
Expect(err).NotTo(HaveOccurred())
})
It("should provision servers", func() {
Expect(config.ProvisionServers(1)).To(Succeed())
config.K3sImage = *k3sImage
Expect(config.ProvisionServers(3)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(tester.ParseNodes(config.KubeconfigFile)).To(HaveLen(3))
g.Expect(tester.NodesReady(config.KubeconfigFile)).To(Succeed())
}, "60s", "5s").Should(Succeed())
})
It("should match respective versions", func() {
out, err := tester.RunCmdOnDocker(config.Servers[0].Name, "k3s --version")
Expect(err).NotTo(HaveOccurred())
Expect(out).To(ContainSubstring(strings.Replace(lastMinorVersion, "-", "+", 1)))
for _, server := range config.Servers[1:] {
out, err := tester.RunCmdOnDocker(server.Name, "k3s --version")
Expect(err).NotTo(HaveOccurred())
// The k3s image is in the format rancher/k3s:v1.20.0-k3s1-amd64
cVersion := strings.Split(*k3sImage, ":")[1]
cVersion = strings.Replace(cVersion, "-amd64", "", 1)
cVersion = strings.Replace(cVersion, "-", "+", 1)
Expect(out).To(ContainSubstring(cVersion))
}
})
It("should destroy the cluster", func() {
Expect(config.Cleanup()).To(Succeed())
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if config != nil && !failed {
config.Cleanup()
}
})

View File

@ -0,0 +1,491 @@
package docker
import (
"bytes"
"context"
"fmt"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
"golang.org/x/mod/semver"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
type TestConfig struct {
TestDir string
KubeconfigFile string
Label string
Secret string
K3sImage string
NumServers int
NumAgents int
Servers []ServerConfig
Agents []AgentConfig
}
type ServerConfig struct {
Name string
Port int
IP string
URL string
}
type AgentConfig struct {
Name string
IP string
}
// NewTestConfig initializes the test environment and returns the configuration
// k3s version and tag information is extracted from the version.sh script
// and supplied as an argument to the function/test
func NewTestConfig(k3sImage string) (*TestConfig, error) {
config := &TestConfig{
K3sImage: k3sImage,
}
// Create temporary directory
tempDir, err := os.MkdirTemp("", "k3s-test-")
if err != nil {
return nil, fmt.Errorf("failed to create temp directory: %v", err)
}
config.TestDir = tempDir
// Setup cleanup on exit
// setupCleanup(config)
// Create required directories
if err := os.MkdirAll(filepath.Join(config.TestDir, "logs"), 0755); err != nil {
return nil, fmt.Errorf("failed to create logs directory: %v", err)
}
// Generate random secret
config.Secret = fmt.Sprintf("%012d", rand.Int63n(1000000000000))
return config, nil
}
// portFree checks if a port is in use and returns true if it is free
func portFree(port int) bool {
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
if err != nil {
return false
}
listener.Close()
return true
}
// getPort finds an available port
func getPort() int {
var port int
for i := 0; i < 100; i++ {
port = 10000 + rand.Intn(50000)
if portFree(port) {
return port
}
}
return -1
}
// ProvisionServers starts the required number of k3s servers
// and updates the kubeconfig file with the first cp server details
func (config *TestConfig) ProvisionServers(numOfServers int) error {
config.NumServers = numOfServers
for i := 0; i < config.NumServers; i++ {
// If a server i already exists, skip. This is useful for scenarios where
// the first server is started seperate from the rest of the servers
if config.Servers != nil && i < len(config.Servers) {
continue
}
testID := filepath.Base(config.TestDir)
name := fmt.Sprintf("k3s-server-%d-%s", i, strings.ToLower(testID))
port := getPort()
if port == -1 {
return fmt.Errorf("failed to find an available port")
}
serverImage := getEnvOrDefault("K3S_IMAGE_SERVER", config.K3sImage)
var joinOrStart string
if numOfServers > 0 {
if i == 0 {
joinOrStart = "--cluster-init"
} else {
if config.Servers[0].URL == "" {
return fmt.Errorf("first server URL is empty")
}
joinOrStart = fmt.Sprintf("--server %s", config.Servers[0].URL)
}
}
// Assemble all the Docker args
dRun := strings.Join([]string{"docker run -d",
"--name", name,
"--hostname", name,
"--privileged",
"-p", fmt.Sprintf("127.0.0.1:%d:6443", port),
"-p", "6443",
"-e", fmt.Sprintf("K3S_TOKEN=%s", config.Secret),
"-e", "K3S_DEBUG=true",
os.Getenv("SERVER_DOCKER_ARGS"),
os.Getenv(fmt.Sprintf("SERVER_%d_DOCKER_ARGS", i)),
os.Getenv("REGISTRY_CLUSTER_ARGS"),
serverImage,
"server", joinOrStart, os.Getenv("SERVER_ARGS"), os.Getenv(fmt.Sprintf("SERVER_%d_ARGS", i))}, " ")
if out, err := RunCommand(dRun); err != nil {
return fmt.Errorf("failed to run server container: %s: %v", out, err)
}
// Get the IP address of the container
ipOutput, err := RunCommand("docker inspect --format \"{{ .NetworkSettings.IPAddress }}\" " + name)
if err != nil {
return err
}
ip := strings.TrimSpace(ipOutput)
url := fmt.Sprintf("https://%s:6443", ip)
config.Servers = append(config.Servers, ServerConfig{
Name: name,
Port: port,
IP: ip,
URL: url,
})
fmt.Printf("Started %s @ %s\n", name, url)
// Sleep for a bit to allow the first server to start
if i == 0 && numOfServers > 1 {
time.Sleep(10 * time.Second)
}
}
// Wait for kubeconfig to be available
time.Sleep(5 * time.Second)
return copyAndModifyKubeconfig(config)
}
func (config *TestConfig) ProvisionAgents(numOfAgents int) error {
config.NumAgents = numOfAgents
if err := checkVersionSkew(config); err != nil {
return err
}
testID := filepath.Base(config.TestDir)
k3sURL := getEnvOrDefault("K3S_URL", config.Servers[0].URL)
var g errgroup.Group
for i := 0; i < config.NumAgents; i++ {
i := i // capture loop variable
g.Go(func() error {
name := fmt.Sprintf("k3s-agent-%d-%s", i, strings.ToLower(testID))
agentInstanceArgs := fmt.Sprintf("AGENT_%d_ARGS", i)
// Assemble all the Docker args
dRun := strings.Join([]string{"docker run -d",
"--name", name,
"--hostname", name,
"--privileged",
"-e", fmt.Sprintf("K3S_TOKEN=%s", config.Secret),
"-e", fmt.Sprintf("K3S_URL=%s", k3sURL),
os.Getenv("AGENT_DOCKER_ARGS"),
os.Getenv(fmt.Sprintf("AGENT_%d_DOCKER_ARGS", i)),
os.Getenv("REGISTRY_CLUSTER_ARGS"),
getEnvOrDefault("K3S_IMAGE_AGENT", config.K3sImage),
"agent", os.Getenv("ARGS"), os.Getenv("AGENT_ARGS"), os.Getenv(agentInstanceArgs)}, " ")
if out, err := RunCommand(dRun); err != nil {
return fmt.Errorf("failed to run agent container: %s: %v", out, err)
}
// Get the IP address of the container
ipOutput, err := RunCommand("docker inspect --format \"{{ .NetworkSettings.IPAddress }}\" " + name)
if err != nil {
return err
}
ip := strings.TrimSpace(ipOutput)
config.Agents = append(config.Agents, AgentConfig{
Name: name,
IP: ip,
})
fmt.Printf("Started %s\n", name)
return nil
})
}
if err := g.Wait(); err != nil {
return err
}
return nil
}
func (config *TestConfig) RemoveNode(nodeName string) error {
cmd := fmt.Sprintf("docker stop %s", nodeName)
if _, err := RunCommand(cmd); err != nil {
return fmt.Errorf("failed to stop node %s: %v", nodeName, err)
}
cmd = fmt.Sprintf("docker rm %s", nodeName)
if _, err := RunCommand(cmd); err != nil {
return fmt.Errorf("failed to remove node %s: %v", nodeName, err)
}
return nil
}
func (config *TestConfig) GetNodeNames() []string {
var nodeNames []string
for _, server := range config.Servers {
nodeNames = append(nodeNames, server.Name)
}
for _, agent := range config.Agents {
nodeNames = append(nodeNames, agent.Name)
}
return nodeNames
}
func (config *TestConfig) Cleanup() error {
errs := make([]error, 0)
// Stop and remove all servers
for _, server := range config.Servers {
if err := config.RemoveNode(server.Name); err != nil {
errs = append(errs, err)
}
}
// Stop and remove all agents
for _, agent := range config.Agents {
if err := config.RemoveNode(agent.Name); err != nil {
errs = append(errs, err)
}
}
// Error out if we hit any issues
if len(errs) > 0 {
return fmt.Errorf("cleanup failed: %v", errs)
}
if config.TestDir != "" {
return os.RemoveAll(config.TestDir)
}
config.Agents = nil
config.Servers = nil
return nil
}
// copyAndModifyKubeconfig copies out kubeconfig from first control-plane server
// and updates the port to match the external port
func copyAndModifyKubeconfig(config *TestConfig) error {
if len(config.Servers) == 0 {
return fmt.Errorf("no servers available to copy kubeconfig")
}
serverID := 0
for i := range config.Servers {
server_args := os.Getenv(fmt.Sprintf("SERVER_%d_ARGS", i))
if !strings.Contains(server_args, "--disable-apiserver") {
serverID = i
break
}
}
cmd := fmt.Sprintf("docker cp %s:/etc/rancher/k3s/k3s.yaml %s/kubeconfig.yaml", config.Servers[serverID].Name, config.TestDir)
if _, err := RunCommand(cmd); err != nil {
return fmt.Errorf("failed to copy kubeconfig: %v", err)
}
cmd = fmt.Sprintf("sed -i -e \"s/:6443/:%d/g\" %s/kubeconfig.yaml", config.Servers[serverID].Port, config.TestDir)
if _, err := RunCommand(cmd); err != nil {
return fmt.Errorf("failed to update kubeconfig: %v", err)
}
config.KubeconfigFile = filepath.Join(config.TestDir, "kubeconfig.yaml")
fmt.Println("Kubeconfig file: ", config.KubeconfigFile)
return nil
}
// RunCmdOnDocker runs a command on a docker container
func RunCmdOnDocker(container, cmd string) (string, error) {
dCmd := fmt.Sprintf("docker exec %s %s", container, cmd)
return RunCommand(dCmd)
}
// RunCommand Runs command on the host.
// Returns stdout and embeds stderr inside the error message.
func RunCommand(cmd string) (string, error) {
var stdout, stderr bytes.Buffer
c := exec.Command("bash", "-c", cmd)
c.Stdout = &stdout
c.Stderr = &stderr
err := c.Run()
if err != nil {
return stdout.String(), fmt.Errorf("failed to run command: %s: %s: %v", cmd, stderr.String(), err)
}
return stdout.String(), nil
}
func checkVersionSkew(config *TestConfig) error {
if config.NumAgents > 0 {
serverImage := getEnvOrDefault("K3S_IMAGE_SERVER", config.K3sImage)
agentImage := getEnvOrDefault("K3S_IMAGE_AGENT", config.K3sImage)
if semver.Compare(semver.MajorMinor(agentImage), semver.MajorMinor(serverImage)) > 0 {
return fmt.Errorf("agent version cannot be higher than server - not supported by Kubernetes version skew policy")
}
}
return nil
}
func getEnvOrDefault(key, defaultValue string) string {
if value := os.Getenv(key); value != "" {
return value
}
return defaultValue
}
// VerifyValidVersion checks for invalid version strings
func VerifyValidVersion(container string, binary string) error {
output, err := RunCmdOnDocker(container, binary+" version")
if err != nil {
return err
}
lines := strings.Split(output, "\n")
// Check for invalid version strings
re := regexp.MustCompile(`(?i).*(dev|head|unknown|fail|refuse|\+[^"]*\.).*`)
for _, line := range lines {
if re.MatchString(line) {
return fmt.Errorf("invalid version string found in %s: %s", binary, line)
}
}
return nil
}
// Returns the latest version from the update channel
func GetVersionFromChannel(upgradeChannel string) (string, error) {
url := fmt.Sprintf("https://update.k3s.io/v1-release/channels/%s", upgradeChannel)
client := &http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
resp, err := client.Get(url)
if err != nil {
return "", fmt.Errorf("failed to get URL: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusFound {
return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
finalURL := resp.Header.Get("Location")
if finalURL == "" {
return "", fmt.Errorf("location header not set")
}
version := finalURL[strings.LastIndex(finalURL, "/")+1:]
version = strings.Replace(version, "+", "-", 1)
return version, nil
}
// TODO the below functions are duplicated in the integration test utils. Consider combining into commmon package
// DeploymentsReady checks if the provided list of deployments are ready, otherwise returns an error
func DeploymentsReady(deployments []string, kubeconfigFile string) error {
deploymentSet := make(map[string]bool)
for _, d := range deployments {
deploymentSet[d] = false
}
client, err := k8sClient(kubeconfigFile)
if err != nil {
return err
}
deploymentList, err := client.AppsV1().Deployments("").List(context.Background(), metav1.ListOptions{})
if err != nil {
return err
}
for _, deployment := range deploymentList.Items {
if _, ok := deploymentSet[deployment.Name]; ok && deployment.Status.ReadyReplicas == deployment.Status.Replicas {
deploymentSet[deployment.Name] = true
}
}
for d, found := range deploymentSet {
if !found {
return fmt.Errorf("failed to deploy %s", d)
}
}
return nil
}
func ParseNodes(kubeconfigFile string) ([]corev1.Node, error) {
clientSet, err := k8sClient(kubeconfigFile)
if err != nil {
return nil, err
}
nodes, err := clientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, err
}
return nodes.Items, nil
}
// PodReady checks if a pod is ready by querying its status
func PodReady(podName, namespace, kubeconfigFile string) (bool, error) {
clientSet, err := k8sClient(kubeconfigFile)
if err != nil {
return false, err
}
pod, err := clientSet.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get pod: %v", err)
}
// Check if the pod is running
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.Name == podName && containerStatus.Ready {
return true, nil
}
}
return false, nil
}
// Checks if all nodes are ready, otherwise returns an error
func NodesReady(kubeconfigFile string) error {
nodes, err := ParseNodes(kubeconfigFile)
if err != nil {
return err
}
for _, node := range nodes {
for _, condition := range node.Status.Conditions {
if condition.Type == corev1.NodeReady && condition.Status != corev1.ConditionTrue {
return fmt.Errorf("node %s is not ready", node.Name)
}
}
}
return nil
}
func k8sClient(kubeconfigFile string) (*kubernetes.Clientset, error) {
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigFile)
if err != nil {
return nil, err
}
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return clientSet, nil
}

View File

@ -0,0 +1,160 @@
package main
import (
"flag"
"fmt"
"net/http"
"os"
"path/filepath"
"strings"
"testing"
tester "github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Using these two flags, we upgrade from the latest release of <branch> to
// the current commit build of K3s defined by <k3sImage>
var k3sImage = flag.String("k3sImage", "", "The current commit build of K3s")
var branch = flag.String("branch", "master", "The release branch to test")
var config *tester.TestConfig
var numServers = 1
var numAgents = 1
func Test_DockerUpgrade(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "Upgrade Docker Test Suite")
}
var _ = Describe("Upgrade Tests", Ordered, func() {
Context("Setup Cluster with Lastest Release", func() {
var latestVersion string
It("should determine latest branch version", func() {
var upgradeChannel string
var err error
if *branch == "master" {
upgradeChannel = "latest"
} else {
upgradeChannel = strings.Replace(*branch, "release-", "v", 1)
url := fmt.Sprintf("https://update.k3s.io/v1-release/channels/%s", upgradeChannel)
resp, err := http.Head(url)
// Cover the case where the branch does not exist yet,
// such as a new unreleased minor version
if err != nil || resp.StatusCode != http.StatusOK {
upgradeChannel = "latest"
}
}
latestVersion, err = tester.GetVersionFromChannel(upgradeChannel)
Expect(err).NotTo(HaveOccurred())
Expect(latestVersion).To(ContainSubstring("v1."))
fmt.Println("Using latest version: ", latestVersion)
})
It("should setup environment", func() {
var err error
config, err = tester.NewTestConfig("rancher/k3s:" + latestVersion)
testID := filepath.Base(config.TestDir)
Expect(err).NotTo(HaveOccurred())
for i := 0; i < numServers; i++ {
m1 := fmt.Sprintf("--mount type=volume,src=k3s-server-%d-%s-rancher,dst=/var/lib/rancher/k3s", i, testID)
m2 := fmt.Sprintf("--mount type=volume,src=k3s-server-%d-%s-log,dst=/var/log", i, testID)
m3 := fmt.Sprintf("--mount type=volume,src=k3s-server-%d-%s-etc,dst=/etc/rancher", i, testID)
Expect(os.Setenv(fmt.Sprintf("SERVER_%d_DOCKER_ARGS", i), fmt.Sprintf("%s %s %s", m1, m2, m3))).To(Succeed())
}
for i := 0; i < numAgents; i++ {
m1 := fmt.Sprintf("--mount type=volume,src=k3s-agent-%d-%s-rancher,dst=/var/lib/rancher/k3s", i, testID)
m2 := fmt.Sprintf("--mount type=volume,src=k3s-agent-%d-%s-log,dst=/var/log", i, testID)
m3 := fmt.Sprintf("--mount type=volume,src=k3s-agent-%d-%s-etc,dst=/etc/rancher", i, testID)
Expect(os.Setenv(fmt.Sprintf("AGENT_%d_DOCKER_ARGS", i), fmt.Sprintf("%s %s %s", m1, m2, m3))).To(Succeed())
}
})
It("should provision servers and agents", func() {
Expect(config.ProvisionServers(numServers)).To(Succeed())
Expect(config.ProvisionAgents(numAgents)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
})
It("should confirm latest version", func() {
for _, server := range config.Servers {
out, err := tester.RunCmdOnDocker(server.Name, "k3s --version")
Expect(err).NotTo(HaveOccurred())
Expect(out).To(ContainSubstring(strings.Replace(latestVersion, "-", "+", 1)))
}
})
It("should deploy a test pod", func() {
const volumeTestManifest = "../resources/volume-test.yaml"
// Apply the manifest
cmd := fmt.Sprintf("kubectl apply -f %s --kubeconfig=%s", volumeTestManifest, config.KubeconfigFile)
_, err := tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest")
Eventually(func() (bool, error) {
return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile)
}, "20s", "5s").Should(BeTrue())
})
It("should upgrade to current commit build", func() {
By("Remove old servers and agents")
for _, server := range config.Servers {
cmd := fmt.Sprintf("docker stop %s", server.Name)
Expect(tester.RunCommand(cmd)).Error().NotTo(HaveOccurred())
cmd = fmt.Sprintf("docker rm %s", server.Name)
Expect(tester.RunCommand(cmd)).Error().NotTo(HaveOccurred())
fmt.Printf("Stopped %s\n", server.Name)
}
config.Servers = nil
for _, agent := range config.Agents {
cmd := fmt.Sprintf("docker stop %s", agent.Name)
Expect(tester.RunCommand(cmd)).Error().NotTo(HaveOccurred())
cmd = fmt.Sprintf("docker rm %s", agent.Name)
Expect(tester.RunCommand(cmd)).Error().NotTo(HaveOccurred())
}
config.Agents = nil
config.K3sImage = *k3sImage
Expect(config.ProvisionServers(numServers)).To(Succeed())
Expect(config.ProvisionAgents(numAgents)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
})
It("should confirm commit version", func() {
for _, server := range config.Servers {
Expect(tester.VerifyValidVersion(server.Name, "kubectl")).To(Succeed())
Expect(tester.VerifyValidVersion(server.Name, "ctr")).To(Succeed())
Expect(tester.VerifyValidVersion(server.Name, "crictl")).To(Succeed())
out, err := tester.RunCmdOnDocker(server.Name, "k3s --version")
Expect(err).NotTo(HaveOccurred())
cVersion := strings.Split(*k3sImage, ":")[1]
cVersion = strings.Replace(cVersion, "-amd64", "", 1)
cVersion = strings.Replace(cVersion, "-", "+", 1)
Expect(out).To(ContainSubstring(cVersion))
}
})
It("should confirm test pod is still Running", func() {
Eventually(func() (bool, error) {
return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile)
}, "20s", "5s").Should(BeTrue())
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if config != nil && !failed {
config.Cleanup()
}
})