none cleanup: force kubeadm, clear /etc/kubernetes, and use sudo for kubectl kill
parent
468182238c
commit
198c1724c7
|
@ -158,7 +158,7 @@ fi
|
||||||
if pgrep kubectl; then
|
if pgrep kubectl; then
|
||||||
echo "killing hung kubectl processes ..."
|
echo "killing hung kubectl processes ..."
|
||||||
ps -afe | grep kubectl | grep -v grep || true
|
ps -afe | grep kubectl | grep -v grep || true
|
||||||
pgrep kubectl | xargs kill || true
|
pgrep kubectl | ${SUDO_PREFIX} xargs kill || true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
function cleanup_stale_routes() {
|
function cleanup_stale_routes() {
|
||||||
|
|
|
@ -36,9 +36,11 @@ export KUBECONFIG="/root/.kube/config"
|
||||||
|
|
||||||
# "none" driver specific cleanup from previous runs.
|
# "none" driver specific cleanup from previous runs.
|
||||||
# kubeadm
|
# kubeadm
|
||||||
sudo kubeadm reset || true
|
sudo kubeadm reset -f || true
|
||||||
# Cleanup data directory
|
# Cleanup data directory
|
||||||
sudo rm -rf /data/*
|
sudo rm -rf /data/*
|
||||||
|
# Cleanup old Kubernetes configs
|
||||||
|
sudo rm -rf /etc/kubernetes/*
|
||||||
# Stop any leftover kubelets
|
# Stop any leftover kubelets
|
||||||
systemctl is-active --quiet kubelet \
|
systemctl is-active --quiet kubelet \
|
||||||
&& echo "stopping kubelet" \
|
&& echo "stopping kubelet" \
|
||||||
|
|
Loading…
Reference in New Issue