Merge pull request #13072 from klaases/kstop1
Stop K8s if running, when '--no-kubernetes' flag usedpull/13103/head
commit
30afddc6c1
|
@ -295,6 +295,12 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *
|
||||||
updateDriver(driverName)
|
updateDriver(driverName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check whether we may need to stop Kubernetes.
|
||||||
|
var stopk8s bool
|
||||||
|
if existing != nil && viper.GetBool(noKubernetes) {
|
||||||
|
stopk8s = true
|
||||||
|
}
|
||||||
|
|
||||||
k8sVersion := getKubernetesVersion(existing)
|
k8sVersion := getKubernetesVersion(existing)
|
||||||
cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, driverName)
|
cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, driverName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -337,6 +343,7 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *
|
||||||
return node.Starter{
|
return node.Starter{
|
||||||
Runner: mRunner,
|
Runner: mRunner,
|
||||||
PreExists: preExists,
|
PreExists: preExists,
|
||||||
|
StopK8s: stopk8s,
|
||||||
MachineAPI: mAPI,
|
MachineAPI: mAPI,
|
||||||
Host: host,
|
Host: host,
|
||||||
ExistingAddons: existingAddons,
|
ExistingAddons: existingAddons,
|
||||||
|
|
|
@ -801,6 +801,38 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) {
|
||||||
return joinCmd, nil
|
return joinCmd, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StopKubernetes attempts to stop existing kubernetes.
|
||||||
|
func StopKubernetes(runner command.Runner, cr cruntime.Manager) {
|
||||||
|
// Verify that Kubernetes is still running.
|
||||||
|
stk := kverify.ServiceStatus(runner, "kubelet")
|
||||||
|
if stk.String() != "Running" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Infof("Kubernetes: Stopping ...")
|
||||||
|
|
||||||
|
// Force stop "Kubelet".
|
||||||
|
if err := sysinit.New(runner).ForceStop("kubelet"); err != nil {
|
||||||
|
klog.Warningf("stop kubelet: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop each Kubernetes container.
|
||||||
|
containers, err := cr.ListContainers(cruntime.ListContainersOptions{Namespaces: []string{"kube-system"}})
|
||||||
|
if err != nil {
|
||||||
|
klog.Warningf("unable to list kube-system containers: %v", err)
|
||||||
|
}
|
||||||
|
if len(containers) > 0 {
|
||||||
|
klog.Warningf("found %d kube-system containers to stop", len(containers))
|
||||||
|
if err := cr.StopContainers(containers); err != nil {
|
||||||
|
klog.Warningf("error stopping containers: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that Kubernetes has stopped.
|
||||||
|
stk = kverify.ServiceStatus(runner, "kubelet")
|
||||||
|
out.Infof("Kubernetes: {{.status}}", out.V{"status": stk.String()})
|
||||||
|
}
|
||||||
|
|
||||||
// DeleteCluster removes the components that were started earlier
|
// DeleteCluster removes the components that were started earlier
|
||||||
func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
|
func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
|
||||||
cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket})
|
cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket})
|
||||||
|
@ -828,21 +860,7 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
|
||||||
klog.Warningf("%s: %v", rr.Command(), err)
|
klog.Warningf("%s: %v", rr.Command(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sysinit.New(k.c).ForceStop("kubelet"); err != nil {
|
StopKubernetes(k.c, cr)
|
||||||
klog.Warningf("stop kubelet: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
containers, err := cr.ListContainers(cruntime.ListContainersOptions{Namespaces: []string{"kube-system"}})
|
|
||||||
if err != nil {
|
|
||||||
klog.Warningf("unable to list kube-system containers: %v", err)
|
|
||||||
}
|
|
||||||
if len(containers) > 0 {
|
|
||||||
klog.Warningf("found %d kube-system containers to stop", len(containers))
|
|
||||||
if err := cr.StopContainers(containers); err != nil {
|
|
||||||
klog.Warningf("error stopping containers: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return derr
|
return derr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,7 @@ import (
|
||||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
|
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
|
||||||
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
|
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
|
||||||
|
"k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm"
|
||||||
"k8s.io/minikube/pkg/minikube/cluster"
|
"k8s.io/minikube/pkg/minikube/cluster"
|
||||||
"k8s.io/minikube/pkg/minikube/cni"
|
"k8s.io/minikube/pkg/minikube/cni"
|
||||||
"k8s.io/minikube/pkg/minikube/command"
|
"k8s.io/minikube/pkg/minikube/command"
|
||||||
|
@ -79,6 +80,7 @@ var (
|
||||||
type Starter struct {
|
type Starter struct {
|
||||||
Runner command.Runner
|
Runner command.Runner
|
||||||
PreExists bool
|
PreExists bool
|
||||||
|
StopK8s bool
|
||||||
MachineAPI libmachine.API
|
MachineAPI libmachine.API
|
||||||
Host *host.Host
|
Host *host.Host
|
||||||
Cfg *config.ClusterConfig
|
Cfg *config.ClusterConfig
|
||||||
|
@ -88,10 +90,14 @@ type Starter struct {
|
||||||
|
|
||||||
// Start spins up a guest and starts the Kubernetes node.
|
// Start spins up a guest and starts the Kubernetes node.
|
||||||
func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
|
func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
|
||||||
var kcs *kubeconfig.Settings
|
stop8ks, err := handleNoKubernetes(starter)
|
||||||
if starter.Node.KubernetesVersion == constants.NoKubernetesVersion { // do not bootstrap cluster if --no-kubernetes
|
if err != nil {
|
||||||
return kcs, config.Write(viper.GetString(config.ProfileName), starter.Cfg)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if stop8ks {
|
||||||
|
return nil, config.Write(viper.GetString(config.ProfileName), starter.Cfg)
|
||||||
|
}
|
||||||
|
|
||||||
// wait for preloaded tarball to finish downloading before configuring runtimes
|
// wait for preloaded tarball to finish downloading before configuring runtimes
|
||||||
waitCacheRequiredImages(&cacheGroup)
|
waitCacheRequiredImages(&cacheGroup)
|
||||||
|
|
||||||
|
@ -118,41 +124,13 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
|
||||||
klog.Errorf("Unable to add host alias: %v", err)
|
klog.Errorf("Unable to add host alias: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var kcs *kubeconfig.Settings
|
||||||
var bs bootstrapper.Bootstrapper
|
var bs bootstrapper.Bootstrapper
|
||||||
if apiServer {
|
if apiServer {
|
||||||
// Must be written before bootstrap, otherwise health checks may flake due to stale IP
|
kcs, bs, err = handleAPIServer(starter, cr, hostIP)
|
||||||
kcs = setupKubeconfig(starter.Host, starter.Cfg, starter.Node, starter.Cfg.Name)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Failed to setup kubeconfig")
|
|
||||||
}
|
|
||||||
|
|
||||||
// setup kubeadm (must come after setupKubeconfig)
|
|
||||||
bs = setupKubeAdm(starter.MachineAPI, *starter.Cfg, *starter.Node, starter.Runner)
|
|
||||||
err = bs.StartCluster(*starter.Cfg)
|
|
||||||
if err != nil {
|
|
||||||
ExitIfFatal(err)
|
|
||||||
out.LogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, *starter.Cfg, starter.Runner))
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// write the kubeconfig to the file system after everything required (like certs) are created by the bootstrapper
|
|
||||||
if err := kubeconfig.Update(kcs); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "Failed kubeconfig update")
|
|
||||||
}
|
|
||||||
|
|
||||||
// scale down CoreDNS from default 2 to 1 replica
|
|
||||||
if err := kapi.ScaleDeployment(starter.Cfg.Name, meta.NamespaceSystem, kconst.CoreDNSDeploymentName, 1); err != nil {
|
|
||||||
klog.Errorf("Unable to scale down deployment %q in namespace %q to 1 replica: %v", kconst.CoreDNSDeploymentName, meta.NamespaceSystem, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// not running this in a Go func can result in DNS answering taking up to 38 seconds, with the Go func it takes 6-10 seconds
|
|
||||||
go func() {
|
|
||||||
// inject {"host.minikube.internal": hostIP} record into CoreDNS
|
|
||||||
if err := addCoreDNSEntry(starter.Runner, "host.minikube.internal", hostIP.String(), *starter.Cfg); err != nil {
|
|
||||||
klog.Warningf("Unable to inject {%q: %s} record into CoreDNS: %v", "host.minikube.internal", hostIP.String(), err)
|
|
||||||
out.Err("Failed to inject host.minikube.internal into CoreDNS, this will limit the pods access to the host IP")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
} else {
|
} else {
|
||||||
bs, err = cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, starter.Runner)
|
bs, err = cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, starter.Runner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -238,6 +216,63 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
|
||||||
return kcs, config.Write(viper.GetString(config.ProfileName), starter.Cfg)
|
return kcs, config.Write(viper.GetString(config.ProfileName), starter.Cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handleNoKubernetes handles starting minikube without Kubernetes.
|
||||||
|
func handleNoKubernetes(starter Starter) (bool, error) {
|
||||||
|
// Do not bootstrap cluster if --no-kubernetes.
|
||||||
|
if starter.Node.KubernetesVersion == constants.NoKubernetesVersion {
|
||||||
|
// Stop existing Kubernetes node if applicable.
|
||||||
|
if starter.StopK8s {
|
||||||
|
cr, err := cruntime.New(cruntime.Config{Type: starter.Cfg.KubernetesConfig.ContainerRuntime, Runner: starter.Runner, Socket: starter.Cfg.KubernetesConfig.CRISocket})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
kubeadm.StopKubernetes(starter.Runner, cr)
|
||||||
|
}
|
||||||
|
return true, config.Write(viper.GetString(config.ProfileName), starter.Cfg)
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleAPIServer handles starting the API server.
|
||||||
|
func handleAPIServer(starter Starter, cr cruntime.Manager, hostIP net.IP) (*kubeconfig.Settings, bootstrapper.Bootstrapper, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Must be written before bootstrap, otherwise health checks may flake due to stale IP.
|
||||||
|
kcs := setupKubeconfig(starter.Host, starter.Cfg, starter.Node, starter.Cfg.Name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errors.Wrap(err, "Failed to setup kubeconfig")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup kubeadm (must come after setupKubeconfig).
|
||||||
|
bs := setupKubeAdm(starter.MachineAPI, *starter.Cfg, *starter.Node, starter.Runner)
|
||||||
|
err = bs.StartCluster(*starter.Cfg)
|
||||||
|
if err != nil {
|
||||||
|
ExitIfFatal(err)
|
||||||
|
out.LogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, *starter.Cfg, starter.Runner))
|
||||||
|
return nil, bs, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the kubeconfig to the file system after everything required (like certs) are created by the bootstrapper.
|
||||||
|
if err := kubeconfig.Update(kcs); err != nil {
|
||||||
|
return nil, bs, errors.Wrap(err, "Failed kubeconfig update")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scale down CoreDNS from default 2 to 1 replica.
|
||||||
|
if err := kapi.ScaleDeployment(starter.Cfg.Name, meta.NamespaceSystem, kconst.CoreDNSDeploymentName, 1); err != nil {
|
||||||
|
klog.Errorf("Unable to scale down deployment %q in namespace %q to 1 replica: %v", kconst.CoreDNSDeploymentName, meta.NamespaceSystem, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not running this in a Go func can result in DNS answering taking up to 38 seconds, with the Go func it takes 6-10 seconds.
|
||||||
|
go func() {
|
||||||
|
// Inject {"host.minikube.internal": hostIP} record into CoreDNS.
|
||||||
|
if err := addCoreDNSEntry(starter.Runner, "host.minikube.internal", hostIP.String(), *starter.Cfg); err != nil {
|
||||||
|
klog.Warningf("Unable to inject {%q: %s} record into CoreDNS: %v", "host.minikube.internal", hostIP.String(), err)
|
||||||
|
out.Err("Failed to inject host.minikube.internal into CoreDNS, this will limit the pods access to the host IP")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return kcs, bs, nil
|
||||||
|
}
|
||||||
|
|
||||||
// joinCluster adds new or prepares and then adds existing node to the cluster.
|
// joinCluster adds new or prepares and then adds existing node to the cluster.
|
||||||
func joinCluster(starter Starter, cpBs bootstrapper.Bootstrapper, bs bootstrapper.Bootstrapper) error {
|
func joinCluster(starter Starter, cpBs bootstrapper.Bootstrapper, bs bootstrapper.Bootstrapper) error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
|
@ -21,6 +21,8 @@ package integration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -46,6 +48,8 @@ func TestNoKubernetes(t *testing.T) {
|
||||||
validator validateFunc
|
validator validateFunc
|
||||||
}{
|
}{
|
||||||
{"StartNoK8sWithVersion", validateStartNoK8sWithVersion},
|
{"StartNoK8sWithVersion", validateStartNoK8sWithVersion},
|
||||||
|
{"StartWithK8s", validateStartWithK8S},
|
||||||
|
{"StartWithStopK8s", validateStartWithStopK8s},
|
||||||
{"Start", validateStartNoK8S},
|
{"Start", validateStartNoK8S},
|
||||||
{"VerifyK8sNotRunning", validateK8SNotRunning},
|
{"VerifyK8sNotRunning", validateK8SNotRunning},
|
||||||
{"ProfileList", validateProfileListNoK8S},
|
{"ProfileList", validateProfileListNoK8S},
|
||||||
|
@ -75,6 +79,7 @@ func TestNoKubernetes(t *testing.T) {
|
||||||
func validateStartNoK8sWithVersion(ctx context.Context, t *testing.T, profile string) {
|
func validateStartNoK8sWithVersion(ctx context.Context, t *testing.T, profile string) {
|
||||||
defer PostMortemLogs(t, profile)
|
defer PostMortemLogs(t, profile)
|
||||||
|
|
||||||
|
// docs: start minikube with no kubernetes.
|
||||||
args := append([]string{"start", "-p", profile, "--no-kubernetes", "--kubernetes-version=1.20"}, StartArgs()...)
|
args := append([]string{"start", "-p", profile, "--no-kubernetes", "--kubernetes-version=1.20"}, StartArgs()...)
|
||||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -82,10 +87,52 @@ func validateStartNoK8sWithVersion(ctx context.Context, t *testing.T, profile st
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validateStartWithK8S starts a minikube cluster with Kubernetes started/configured.
|
||||||
|
func validateStartWithK8S(ctx context.Context, t *testing.T, profile string) {
|
||||||
|
defer PostMortemLogs(t, profile)
|
||||||
|
|
||||||
|
// docs: start minikube with Kubernetes.
|
||||||
|
args := append([]string{"start", "-p", profile}, StartArgs()...)
|
||||||
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// docs: return an error if Kubernetes is not running.
|
||||||
|
if k8sStatus := getK8sStatus(ctx, t, profile); k8sStatus != "Running" {
|
||||||
|
t.Errorf("Kubernetes status, got: %s, want: Running", k8sStatus)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateStartWithStopK8s starts a minikube cluster while stopping Kubernetes.
|
||||||
|
func validateStartWithStopK8s(ctx context.Context, t *testing.T, profile string) {
|
||||||
|
defer PostMortemLogs(t, profile)
|
||||||
|
|
||||||
|
// docs: start minikube with no Kubernetes.
|
||||||
|
args := append([]string{"start", "-p", profile, "--no-kubernetes"}, StartArgs()...)
|
||||||
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// docs: return an error if Kubernetes is not stopped.
|
||||||
|
if k8sStatus := getK8sStatus(ctx, t, profile); k8sStatus != "Stopped" {
|
||||||
|
t.Errorf("Kubernetes status, got: %s, want: Stopped", k8sStatus)
|
||||||
|
}
|
||||||
|
|
||||||
|
// docs: delete minikube profile.
|
||||||
|
args = []string{"delete", "-p", profile}
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to delete minikube profile with args: %q : %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// validateStartNoK8S starts a minikube cluster without kubernetes started/configured
|
// validateStartNoK8S starts a minikube cluster without kubernetes started/configured
|
||||||
func validateStartNoK8S(ctx context.Context, t *testing.T, profile string) {
|
func validateStartNoK8S(ctx context.Context, t *testing.T, profile string) {
|
||||||
defer PostMortemLogs(t, profile)
|
defer PostMortemLogs(t, profile)
|
||||||
|
|
||||||
|
// docs: start minikube with no Kubernetes.
|
||||||
args := append([]string{"start", "-p", profile, "--no-kubernetes"}, StartArgs()...)
|
args := append([]string{"start", "-p", profile, "--no-kubernetes"}, StartArgs()...)
|
||||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -137,7 +184,7 @@ func validateProfileListNoK8S(ctx context.Context, t *testing.T, profile string)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateStartNoArgs valides that minikube start with no args works
|
// validateStartNoArgs validates that minikube start with no args works.
|
||||||
func validateStartNoArgs(ctx context.Context, t *testing.T, profile string) {
|
func validateStartNoArgs(ctx context.Context, t *testing.T, profile string) {
|
||||||
defer PostMortemLogs(t, profile)
|
defer PostMortemLogs(t, profile)
|
||||||
|
|
||||||
|
@ -146,5 +193,22 @@ func validateStartNoArgs(ctx context.Context, t *testing.T, profile string) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err)
|
t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getK8sStatus returns whether Kubernetes is running.
|
||||||
|
func getK8sStatus(ctx context.Context, t *testing.T, profile string) string {
|
||||||
|
// Run `minikube status` as JSON output.
|
||||||
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-o", "json"))
|
||||||
|
// We expect Kubernetes config to come back as configured, since we started Kubernetes in a previous test.
|
||||||
|
if err != nil && rr.ExitCode != 2 {
|
||||||
|
t.Errorf("failed to run minikube status with json output. args %q : %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal JSON output.
|
||||||
|
var jsonObject map[string]interface{}
|
||||||
|
err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to decode json from minikube status. args %q. %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s", jsonObject["Kubelet"])
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue