Remove unused docker tests

Signed-off-by: Derek Nola <derek.nola@suse.com>
pull/12125/head
Derek Nola 2025-02-06 11:51:44 -08:00
parent c652b1b26f
commit 49a5481e6c
12 changed files with 0 additions and 893 deletions

View File

@ -1,18 +0,0 @@
apiVersion: apiserver.config.k8s.io/v1
kind: AdmissionConfiguration
plugins:
- name: PodSecurity
configuration:
apiVersion: pod-security.admission.config.k8s.io/v1beta1
kind: PodSecurityConfiguration
defaults:
enforce: "privileged"
enforce-version: "latest"
audit: "baseline"
audit-version: "latest"
warn: "baseline"
warn-version: "latest"
exemptions:
usernames: []
runtimeClasses: []
namespaces: [kube-system]

View File

@ -1,128 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example
namespace: default
labels:
app.kubernetes.io: example
spec:
selector:
matchLabels:
app.kubernetes.io/name: example
template:
metadata:
labels:
app.kubernetes.io/name: example
spec:
automountServiceAccountToken: false
securityContext:
runAsUser: 405
runAsGroup: 100
containers:
- name: socat
image: docker.io/alpine/socat:1.7.4.3-r1
args:
- "TCP-LISTEN:8080,reuseaddr,fork"
- "EXEC:echo -e 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n$(NODE_IP) $(POD_NAMESPACE)/$(POD_NAME)\r\n'"
ports:
- containerPort: 8080
name: http
env:
- name: NODE_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
readinessProbe:
initialDelaySeconds: 2
periodSeconds: 10
httpGet:
path: /
port: 8080
---
apiVersion: v1
kind: Service
metadata:
name: example
namespace: default
spec:
type: NodePort
selector:
app.kubernetes.io/name: example
ports:
- name: http
protocol: TCP
port: 80
nodePort: 30096
targetPort: http
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: example
spec:
rules:
- host: "example.com"
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: example
port:
name: http
---
# Allow access to example backend from traefik ingress
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: ingress-to-backend-example
namespace: default
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: example
ingress:
- ports:
- port: 8080
protocol: TCP
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: kube-system
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
policyTypes:
- Ingress
---
# Allow access to example backend from outside the cluster via nodeport service
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: nodeport-to-backend-example
namespace: default
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: example
ingress:
- ports:
- port: 8080
protocol: TCP
- from:
- ipBlock:
cidr: 0.0.0.0/0
except:
- 10.42.0.0/16
- 10.43.0.0/16
policyTypes:
- Ingress

View File

@ -1,120 +0,0 @@
---
# Allow all traffic within the kube-system namespace; block all other access
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: intra-namespace
namespace: kube-system
spec:
podSelector: {}
ingress:
- from:
- namespaceSelector:
matchLabels:
name: kube-system
policyTypes:
- Ingress
---
# Allow all traffic within the default namespace; block all other access
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: intra-namespace
namespace: default
spec:
podSelector: {}
ingress:
- from:
- namespaceSelector:
matchLabels:
name: default
policyTypes:
- Ingress
---
# Allow traffic within the kube-public namespace; block all other access
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: intra-namespace
namespace: kube-public
spec:
podSelector: {}
ingress:
- from:
- namespaceSelector:
matchLabels:
name: kube-public
policyTypes:
- Ingress
---
# Allow all access to metrics-server
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-all-metrics-server
namespace: kube-system
spec:
podSelector:
matchLabels:
k8s-app: metrics-server
ingress:
- {}
policyTypes:
- Ingress
---
# Allow all access to coredns DNS ports
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-network-dns-policy
namespace: kube-system
spec:
ingress:
- ports:
- port: 53
protocol: TCP
- port: 53
protocol: UDP
podSelector:
matchLabels:
k8s-app: kube-dns
policyTypes:
- Ingress
---
# Allow all access to the servicelb traefik HTTP/HTTPS ports
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-all-svclb-traefik
namespace: kube-system
spec:
podSelector:
matchLabels:
svccontroller.k3s.cattle.io/svcname: traefik
ingress:
- ports:
- port: 80
protocol: TCP
- port: 443
protocol: TCP
policyTypes:
- Ingress
---
# Allow all access to traefik HTTP/HTTPS ports
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-all-traefik
namespace: kube-system
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
ingress:
- ports:
- port: 8000
protocol: TCP
- port: 8443
protocol: TCP
policyTypes:
- Ingress

View File

@ -50,10 +50,6 @@ if [ "$ARCH" == 'arm' ] || [ "$DRONE_BUILD_EVENT" = 'tag' ]; then
fi
#TODO convert this to new go test framework
. ./tests/docker/test-run-hardened
echo "Did test-run-hardened $?"
# ---
[ "$ARCH" != 'amd64' ] && \

View File

@ -1,50 +0,0 @@
#!/bin/bash
all_services=(
coredns
local-path-provisioner
metrics-server
traefik
)
export NUM_SERVERS=1
export NUM_AGENTS=1
export WAIT_SERVICES="${all_services[@]}"
start-test() {
use-local-storage-volume
docker exec $(cat $TEST_DIR/servers/1/metadata/name) check-config || true
verify-valid-versions $(cat $TEST_DIR/servers/1/metadata/name)
verify-airgap-images $(cat $TEST_DIR/{servers,agents}/*/metadata/name)
}
export -f start-test
# -- check for changes to the airgap image list
verify-airgap-images() {
local airgap_image_list='scripts/airgap/image-list.txt'
for name in $@; do
docker exec $name crictl images -o json \
| jq -r '.images[].repoTags[0] | select(. != null)'
done | sort -u >$airgap_image_list.tmp
if ! diff $airgap_image_list{,.tmp}; then
echo '[ERROR] Failed airgap image check'
return 1
fi
}
export -f verify-airgap-images
# -- create a pod that uses local-storage to ensure that the local-path-provisioner
# -- helper image gets used
use-local-storage-volume() {
local volume_test_manifest='scripts/airgap/volume-test.yaml'
kubectl apply -f $volume_test_manifest
wait-for-services volume-test
}
export -f use-local-storage-volume
# --- create a basic cluster and check for valid versions
LABEL=BASICS run-test
cleanup-test-env

View File

@ -1,29 +0,0 @@
#!/bin/bash
all_services=(
coredns
local-path-provisioner
metrics-server
traefik
)
export NUM_SERVERS=1
export NUM_AGENTS=1
export WAIT_SERVICES="${all_services[@]}"
agent-pre-hook() {
timeout --foreground 2m bash -c "wait-for-nodes $(( NUM_SERVERS ))"
local server=$(cat $TEST_DIR/servers/1/metadata/name)
docker exec $server k3s token create --ttl=5m --description=Test > $TEST_DIR/metadata/secret
}
export -f agent-pre-hook
start-test() {
echo "Cluster is up with ephemeral join token"
}
export -f start-test
# --- create a basic cluster with an agent joined using the ephemeral token and check for functionality
LABEL=BOOTSTRAP-TOKEN run-test
cleanup-test-env

View File

@ -1,50 +0,0 @@
#!/bin/bash
all_services=(
coredns
local-path-provisioner
metrics-server
traefik
)
export NUM_SERVERS=1
export NUM_AGENTS=1
export WAIT_SERVICES="${all_services[@]}"
# -- This test runs in docker mounting the docker socket,
# -- so we can't directly mount files into the test containers. Instead we have to
# -- run a dummy container with a volume, copy files into that volume, and then
# -- share it with the other containers that need the file.
cluster-pre-hook() {
mkdir -p $TEST_DIR/pause/0/metadata
local testID=$(basename $TEST_DIR)
local name=$(echo "k3s-pause-0-${testID,,}" | tee $TEST_DIR/pause/0/metadata/name)
export SERVER_DOCKER_ARGS="--mount type=volume,src=$name,dst=/var/lib/rancher/k3s/server/tls"
docker run \
-d --name $name \
--hostname $name \
${SERVER_DOCKER_ARGS} \
rancher/mirrored-pause:3.6 \
>/dev/null
DATA_DIR="$TEST_DIR/pause/0/k3s" ./contrib/util/generate-custom-ca-certs.sh
docker cp "$TEST_DIR/pause/0/k3s" $name:/var/lib/rancher
}
export -f cluster-pre-hook
start-test() {
echo "Cluster is up with custom CA certs"
}
export -f start-test
test-cleanup-hook(){
local testID=$(basename $TEST_DIR)
docker volume ls -q | grep -F ${testID,,} | xargs -r docker volume rm
}
export -f test-cleanup-hook
# --- create a basic cluster and check for functionality
LABEL=CUSTOM-CA-CERTS run-test
cleanup-test-env

View File

@ -1,49 +0,0 @@
#!/bin/bash
all_services=(
coredns
local-path-provisioner
metrics-server
traefik
)
export NUM_SERVERS=1
export NUM_AGENTS=1
export WAIT_SERVICES="${all_services[@]}"
export SERVER_ARGS="--node-taint=CriticalAddonsOnly=true:NoExecute"
start-test() {
echo "Cluster is up"
}
export -f start-test
# --- suppress test failures if the agent is intentionally incompatible with the server version
test-post-hook() {
if [[ $1 -eq 0 ]]; then
return
fi
dump-logs skip-output
grep -sqF 'incompatible down-level server detected' $TEST_DIR/agents/*/logs/system.log
}
export -f test-post-hook
REPO=${REPO:-rancher}
IMAGE_NAME=${IMAGE_NAME:-k3s}
PREVIOUS_CHANNEL=$(echo ${VERSION_K8S} | awk -F. '{print "v1." ($2 - 1)}')
PREVIOUS_VERSION=$(curl -s https://update.k3s.io/v1-release/channels/${PREVIOUS_CHANNEL} -o /dev/null -w '%{redirect_url}' | awk -F/ '{print gensub(/\+/, "-", "g", $NF)}')
STABLE_VERSION=$(curl -s https://update.k3s.io/v1-release/channels/stable -o /dev/null -w '%{redirect_url}' | awk -F/ '{print gensub(/\+/, "-", "g", $NF)}')
LATEST_VERSION=$(curl -s https://update.k3s.io/v1-release/channels/latest -o /dev/null -w '%{redirect_url}' | awk -F/ '{print gensub(/\+/, "-", "g", $NF)}')
# --- create a basic cluster to test for compat with the previous minor version of the server and agent
K3S_IMAGE_SERVER=${REPO}/${IMAGE_NAME}:${PREVIOUS_VERSION} LABEL=PREVIOUS-SERVER run-test
K3S_IMAGE_AGENT=${REPO}/${IMAGE_NAME}:${PREVIOUS_VERSION} LABEL=PREVIOUS-AGENT run-test
# --- create a basic cluster to test for compat with the stable version of the server and agent
K3S_IMAGE_SERVER=${REPO}/${IMAGE_NAME}:${STABLE_VERSION} LABEL=STABLE-SERVER run-test
K3S_IMAGE_AGENT=${REPO}/${IMAGE_NAME}:${STABLE_VERSION} LABEL=STABLE-AGENT run-test
# --- create a basic cluster to test for compat with the latest version of the server and agent
K3S_IMAGE_SERVER=${REPO}/${IMAGE_NAME}:${LATEST_VERSION} LABEL=LATEST-SERVER run-test
K3S_IMAGE_AGENT=${REPO}/${IMAGE_NAME}:${LATEST_VERSION} LABEL=LATEST-AGENT run-test
cleanup-test-env

View File

@ -1,116 +0,0 @@
#!/bin/bash
all_services=(
coredns
local-path-provisioner
metrics-server
traefik
)
export NUM_SERVERS=2
export NUM_AGENTS=0
export WAIT_SERVICES="${all_services[@]}"
export SERVER_1_ARGS="--cluster-init"
REPO=${REPO:-rancher}
IMAGE_NAME=${IMAGE_NAME:-k3s}
PREVIOUS_CHANNEL=$(echo ${VERSION_K8S} | awk -F. '{print "v1." ($2 - 1)}')
PREVIOUS_VERSION=$(curl -s https://update.k3s.io/v1-release/channels/${PREVIOUS_CHANNEL} -o /dev/null -w '%{redirect_url}' | awk -F/ '{print gensub(/\+/, "-", "g", $NF)}')
STABLE_VERSION=$(curl -s https://update.k3s.io/v1-release/channels/stable -o /dev/null -w '%{redirect_url}' | awk -F/ '{print gensub(/\+/, "-", "g", $NF)}')
LATEST_VERSION=$(curl -s https://update.k3s.io/v1-release/channels/latest -o /dev/null -w '%{redirect_url}' | awk -F/ '{print gensub(/\+/, "-", "g", $NF)}')
server-post-hook() {
if [ $1 -eq 1 ]; then
local url=$(cat $TEST_DIR/servers/1/metadata/url)
export SERVER_ARGS="${SERVER_ARGS} --server $url"
fi
}
export -f server-post-hook
start-test() {
echo "Cluster is up"
}
export -f start-test
# --- create a basic cluster to test joining managed etcd
LABEL="ETCD-JOIN-BASIC" SERVER_ARGS="" run-test
# --- create a basic cluster to test joining a managed etcd cluster with --agent-token set
LABEL="ETCD-JOIN-AGENTTOKEN" SERVER_ARGS="--agent-token ${RANDOM}${RANDOM}${RANDOM}" run-test
# --- create a cluster with three etcd-only server, two control-plane-only server, and one agent
server-post-hook() {
if [ $1 -eq 1 ]; then
local url=$(cat $TEST_DIR/servers/1/metadata/url)
export SERVER_ARGS="${SERVER_ARGS} --server $url"
fi
}
export -f server-post-hook
LABEL="ETCD-SPLIT-ROLE" NUM_AGENTS=1 KUBECONFIG_SERVER=4 NUM_SERVERS=5 \
SERVER_1_ARGS="--disable-apiserver --disable-controller-manager --disable-scheduler --cluster-init" \
SERVER_2_ARGS="--disable-apiserver --disable-controller-manager --disable-scheduler" \
SERVER_3_ARGS="--disable-apiserver --disable-controller-manager --disable-scheduler" \
SERVER_4_ARGS="--disable-etcd" \
SERVER_5_ARGS="--disable-etcd" \
run-test
# The following tests deploy clusters of mixed versions. The traefik helm chart may not deploy
# correctly until all servers have been upgraded to the same release, so don't wait for it.
all_services=(
coredns
local-path-provisioner
metrics-server
)
export WAIT_SERVICES="${all_services[@]}"
# --- test joining managed etcd cluster with stable-version first server and current-build second server
# --- this test is skipped if the second node is down-level, as we don't support adding a down-level server to an existing cluster
server-post-hook() {
if [ $1 -eq 1 ]; then
SERVER_1_MINOR=$(awk -F. '{print $2}' <<<${K3S_IMAGE_SERVER})
SERVER_2_MINOR=$(awk -F. '{print $2}' <<<${K3S_IMAGE})
if [ $SERVER_1_MINOR -gt $SERVER_2_MINOR ]; then
echo "First server minor version cannot be higher than second server"
exit 0
fi
local url=$(cat $TEST_DIR/servers/1/metadata/url)
export SERVER_ARGS="${SERVER_ARGS} --server $url"
export K3S_IMAGE_SERVER=${K3S_IMAGE}
fi
}
export -f server-post-hook
LABEL="ETCD-JOIN-STABLE-FIRST" K3S_IMAGE_SERVER=${REPO}/${IMAGE_NAME}:${STABLE_VERSION} run-test
# --- test joining managed etcd cluster with latest-version first server and current-build second server
# --- this test is skipped if the second node is down-level, as we don't support adding a down-level server to an existing cluster
server-post-hook() {
if [ $1 -eq 1 ]; then
SERVER_1_MINOR=$(awk -F. '{print $2}' <<<${K3S_IMAGE_SERVER})
SERVER_2_MINOR=$(awk -F. '{print $2}' <<<${K3S_IMAGE})
if [ $SERVER_1_MINOR -gt $SERVER_2_MINOR ]; then
echo "First server minor version cannot be higher than second server"
exit 0
fi
local url=$(cat $TEST_DIR/servers/1/metadata/url)
export SERVER_ARGS="${SERVER_ARGS} --server $url"
export K3S_IMAGE_SERVER=${K3S_IMAGE}
fi
}
export -f server-post-hook
LABEL="ETCD-JOIN-LATEST-FIRST" K3S_IMAGE_SERVER=${REPO}/${IMAGE_NAME}:${LATEST_VERSION} run-test
# --- test joining a managed etcd cluster with incompatible configuration
test-post-hook() {
if [[ $1 -eq 0 ]]; then
return
fi
dump-logs skip-output
grep -sqF 'critical configuration value mismatch' $TEST_DIR/servers/2/logs/system.log
}
export -f test-post-hook
LABEL="ETCD-JOIN-MISMATCH" SERVER_2_ARGS="--cluster-cidr 10.0.0.0/16" run-test
cleanup-test-env

View File

@ -1,96 +0,0 @@
#!/bin/bash
all_services=(
coredns
local-path-provisioner
metrics-server
traefik
)
export NUM_SERVERS=1
export NUM_AGENTS=1
export WAIT_SERVICES="${all_services[@]}"
export AGENT_ARGS="--selinux=true \
--protect-kernel-defaults=true \
--kubelet-arg=streaming-connection-idle-timeout=5m \
--kubelet-arg=make-iptables-util-chains=true"
export SERVER_ARGS="--selinux=true \
--protect-kernel-defaults=true \
--kubelet-arg=streaming-connection-idle-timeout=5m \
--kubelet-arg=make-iptables-util-chains=true \
--secrets-encryption=true \
--kube-apiserver-arg=audit-log-path=/tmp/audit-log \
--kube-apiserver-arg=audit-log-maxage=30 \
--kube-apiserver-arg=audit-log-maxbackup=10 \
--kube-apiserver-arg=audit-log-maxsize=100 \
--kube-apiserver-arg=enable-admission-plugins=NodeRestriction,NamespaceLifecycle,ServiceAccount \
--kube-apiserver-arg=admission-control-config-file=/opt/rancher/k3s/cluster-level-pss.yaml \
--kube-controller-manager-arg=terminated-pod-gc-threshold=10 \
--kube-controller-manager-arg=use-service-account-credentials=true"
# -- This test runs in docker mounting the docker socket,
# -- so we can't directly mount files into the test containers. Instead we have to
# -- run a dummy container with a volume, copy files into that volume, and then
# -- share it with the other containers that need the file.
cluster-pre-hook() {
mkdir -p $TEST_DIR/pause/0/metadata
local testID=$(basename $TEST_DIR)
local name=$(echo "k3s-pause-0-${testID,,}" | tee $TEST_DIR/pause/0/metadata/name)
export SERVER_DOCKER_ARGS="--mount type=volume,src=$name,dst=/opt/rancher/k3s"
docker run \
-d --name $name \
--hostname $name \
${SERVER_DOCKER_ARGS} \
rancher/mirrored-pause:3.6 \
>/dev/null
docker cp scripts/hardened/cluster-level-pss.yaml $name:/opt/rancher/k3s/cluster-level-pss.yaml
}
export -f cluster-pre-hook
# -- deploy and wait for a daemonset to run on all nodes, then wait a couple more
# -- seconds for traefik to see the service endpoints before testing.
start-test() {
find ./scripts/hardened/ -name 'hardened-k3s-*.yaml' -printf '-f\0%p\0' | xargs -tr0 kubectl create
kubectl rollout status daemonset/example --watch --timeout=5m
sleep 15
verify-ingress
verify-nodeport
}
export -f start-test
test-cleanup-hook(){
local testID=$(basename $TEST_DIR)
docker volume ls -q | grep -F ${testID,,} | xargs -r docker volume rm
}
export -f test-cleanup-hook
# -- confirm we can make a request through the ingress
verify-ingress() {
local ips=$(cat $TEST_DIR/{servers,agents}/*/metadata/ip)
local schemes="http https"
for ip in $ips; do
for scheme in $schemes; do
curl -vksf -H 'Host: example.com' ${scheme}://${ip}/
done
done
}
export -f verify-ingress
# -- confirm we can make a request through the nodeport service
verify-nodeport() {
local ips=$(cat $TEST_DIR/{servers,agents}/*/metadata/ip)
local ports=$(kubectl get service/example -o 'jsonpath={.spec.ports[*].nodePort}')
for ip in $ips; do
for port in $ports; do
curl -vksf -H 'Host: example.com' http://${ip}:${port}
done
done
}
export -f verify-nodeport
# --- create a basic cluster and check for functionality
LABEL=HARDENED run-test
cleanup-test-env

View File

@ -1,136 +0,0 @@
#!/bin/bash
all_services=(
coredns
local-path-provisioner
metrics-server
traefik
)
export NUM_SERVERS=1
export NUM_AGENTS=1
export WAIT_SERVICES="${all_services[@]}"
export SERVER_ARGS="--node-taint=CriticalAddonsOnly=true:NoExecute"
# ---
cluster-pre-hook() {
export SERVER_ARGS="${SERVER_ARGS}
--snapshotter=stargz
"
export AGENT_ARGS="${AGENT_ARGS}
--snapshotter=stargz
"
}
export -f cluster-pre-hook
# ---
start-test() {
local REMOTE_SNAPSHOT_LABEL="containerd.io/snapshot/remote"
local TEST_IMAGE="ghcr.io/stargz-containers/k3s-test-ubuntu:20.04-esgz"
local TEST_POD_NAME=testpod-$(head /dev/urandom | tr -dc a-z0-9 | head -c 10)
local TEST_CONTAINER_NAME=testcontainer-$(head /dev/urandom | tr -dc a-z0-9 | head -c 10)
# Create the target Pod
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: ${TEST_POD_NAME}
spec:
containers:
- name: ${TEST_CONTAINER_NAME}
image: ${TEST_IMAGE}
command: ["sleep"]
args: ["infinity"]
EOF
wait-for-pod "${TEST_POD_NAME}"
# Check if all layers are remote snapshots
NODE=$(kubectl get pods "${TEST_POD_NAME}" -ojsonpath='{.spec.nodeName}')
LAYER=$(get-topmost-layer "${NODE}" "${TEST_CONTAINER_NAME}")
LAYERSNUM=0
for (( ; ; )) ; do
LAYER=$(docker exec -i "${NODE}" ctr --namespace="k8s.io" snapshot --snapshotter=stargz info "${LAYER}" | jq -r '.Parent')
if [ "${LAYER}" == "null" ] ; then
break
elif [ ${LAYERSNUM} -gt 100 ] ; then
echo "testing image contains too many layes > 100"
return 1
fi
((LAYERSNUM+=1))
LABEL=$(docker exec -i "${NODE}" ctr --namespace="k8s.io" snapshots --snapshotter=stargz info "${LAYER}" \
| jq -r ".Labels.\"${REMOTE_SNAPSHOT_LABEL}\"")
echo "Checking layer ${LAYER} : ${LABEL}"
if [ "${LABEL}" == "null" ] ; then
echo "layer ${LAYER} isn't remote snapshot"
return 1
fi
done
if [ ${LAYERSNUM} -eq 0 ] ; then
echo "cannot get layers"
return 1
fi
return 0
}
export -f start-test
wait-for-pod() {
local POD_NAME="${1}"
if [ "${POD_NAME}" == "" ] ; then
return 1
fi
IDX=0
DEADLINE=120
for (( ; ; )) ; do
STATUS=$(kubectl get pods "${POD_NAME}" -o 'jsonpath={..status.containerStatuses[0].state.running.startedAt}${..status.containerStatuses[0].state.waiting.reason}')
echo "Status: ${STATUS}"
STARTEDAT=$(echo "${STATUS}" | cut -f 1 -d '$')
if [ "${STARTEDAT}" != "" ] ; then
echo "Pod created"
break
elif [ ${IDX} -gt ${DEADLINE} ] ; then
echo "Deadline exeeded to wait for pod creation"
return 1
fi
((IDX+=1))
sleep 1
done
return 0
}
export -f wait-for-pod
get-topmost-layer() {
local NODE="${1}"
local CONTAINER="${2}"
local TARGET_CONTAINER=
if [ "${NODE}" == "" ] || [ "${CONTAINER}" == "" ] ; then
return 1
fi
for (( RETRY=1; RETRY<=50; RETRY++ )) ; do
TARGET_CONTAINER=$(docker exec -i "${NODE}" ctr --namespace="k8s.io" c ls -q labels."io.kubernetes.container.name"=="${CONTAINER}" | sed -n 1p)
if [ "${TARGET_CONTAINER}" != "" ] ; then
break
fi
sleep 3
done
if [ "${TARGET_CONTAINER}" == "" ] ; then
return 1
fi
LAYER=$(docker exec -i "${NODE}" ctr --namespace="k8s.io" c info "${TARGET_CONTAINER}" | jq -r '.SnapshotKey')
echo "${LAYER}"
}
export -f get-topmost-layer
# --- create a basic cluster and check for lazy pulling
LABEL=LAZYPULL run-test
cleanup-test-env

View File

@ -1,97 +0,0 @@
#!/bin/bash
all_services=(
coredns
local-path-provisioner
metrics-server
traefik
)
export NUM_SERVERS=1
export NUM_AGENTS=1
export WAIT_SERVICES="${all_services[@]}"
REPO=${REPO:-rancher}
IMAGE_NAME=${IMAGE_NAME:-k3s}
CURRENT_CHANNEL=$(echo ${VERSION_K8S} | awk -F. '{print "v1." $2}')
CURRENT_VERSION=$(curl -s https://update.k3s.io/v1-release/channels/${CURRENT_CHANNEL} -o /dev/null -w '%{redirect_url}' | awk -F/ '{print gensub(/\+/, "-", "g", $NF)}')
if [ -z "${CURRENT_VERSION}" ]; then
CURRENT_VERSION=${VERSION_TAG}
fi
export K3S_IMAGE_SERVER=${REPO}/${IMAGE_NAME}:${CURRENT_VERSION}${SUFFIX}
export K3S_IMAGE_AGENT=${REPO}/${IMAGE_NAME}:${CURRENT_VERSION}${SUFFIX}
server-pre-hook(){
local testID=$(basename $TEST_DIR)
export SERVER_DOCKER_ARGS="\
--mount type=volume,src=k3s-server-$1-${testID,,}-rancher,dst=/var/lib/rancher/k3s \
--mount type=volume,src=k3s-server-$1-${testID,,}-log,dst=/var/log \
--mount type=volume,src=k3s-server-$1-${testID,,}-etc,dst=/etc/rancher"
}
export -f server-pre-hook
agent-pre-hook(){
local testID=$(basename $TEST_DIR)
export AGENT_DOCKER_ARGS="\
--mount type=volume,src=k3s-agent-$1-${testID,,}-rancher,dst=/var/lib/rancher/k3s \
--mount type=volume,src=k3s-agent-$1-${testID,,}-log,dst=/var/log \
--mount type=volume,src=k3s-agent-$1-${testID,,}-etc,dst=/etc/rancher"
}
export -f agent-pre-hook
start-test() {
# Create a pod and print the version before upgrading
kubectl get node -o wide
kubectl create -f scripts/airgap/volume-test.yaml
# Add post-hook sleeps to give the kubelet time to update the version after startup.
# Server gets an extra 60 seconds to handle the metrics-server service being unavailable:
# https://github.com/kubernetes/kubernetes/issues/120739
server-post-hook(){
sleep 75
}
export -f server-post-hook
agent-post-hook(){
sleep 15
}
export -f agent-post-hook
# Switch the image back to the current build, delete the node containers, and re-provision with the same datastore volumes
unset K3S_IMAGE_SERVER
unset K3S_IMAGE_AGENT
if [ $NUM_AGENTS -gt 0 ]; then
for i in $(seq 1 $NUM_AGENTS); do
docker rm -f -v $(cat $TEST_DIR/agents/$i/metadata/name)
rm -rf $TEST_DIR/agents/$i
done
fi
for i in $(seq 1 $NUM_SERVERS); do
docker rm -f -v $(cat $TEST_DIR/servers/$i/metadata/name)
rm -rf $TEST_DIR/servers/$i
done
provision-cluster
# Confirm that the nodes are running the current build and that the pod we created earlier is still there
. ./scripts/version.sh || true
verify-valid-versions $(cat $TEST_DIR/servers/1/metadata/name)
kubectl get pod -n kube-system volume-test -o wide
if ! kubectl get node -o wide | grep -qF $VERSION; then
echo "Expected version $VERSION not found in node list"
return 1
fi
}
export -f start-test
test-cleanup-hook(){
local testID=$(basename $TEST_DIR)
docker volume ls -q | grep -F ${testID,,} | xargs -r docker volume rm
}
export -f test-cleanup-hook
# --- create a single-node cluster from the latest release, then restart the containers with the current build
LABEL=UPGRADE run-test
cleanup-test-env