Merge pull request #7105 from prasadkatti/add_desc_node_to_minikube_logs
Add kubectl desc nodes to minikube logspull/7139/head
commit
cf887e7a1a
|
@ -95,18 +95,18 @@ var logsCmd = &cobra.Command{
|
|||
exit.WithError("Unable to get runtime", err)
|
||||
}
|
||||
if followLogs {
|
||||
err := logs.Follow(cr, bs, runner)
|
||||
err := logs.Follow(cr, bs, *cfg, runner)
|
||||
if err != nil {
|
||||
exit.WithError("Follow", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if showProblems {
|
||||
problems := logs.FindProblems(cr, bs, runner)
|
||||
problems := logs.FindProblems(cr, bs, *cfg, runner)
|
||||
logs.OutputProblems(problems, numberOfProblems)
|
||||
return
|
||||
}
|
||||
err = logs.Output(cr, bs, runner, numberOfLines)
|
||||
err = logs.Output(cr, bs, *cfg, runner, numberOfLines)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting machine logs", err)
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ type Bootstrapper interface {
|
|||
UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error
|
||||
GenerateToken(config.ClusterConfig) (string, error)
|
||||
// LogCommands returns a map of log type to a command which will display that log.
|
||||
LogCommands(LogOptions) map[string]string
|
||||
LogCommands(config.ClusterConfig, LogOptions) map[string]string
|
||||
SetupCerts(config.KubernetesConfig, config.Node) error
|
||||
GetKubeletStatus() (string, error)
|
||||
GetAPIServerStatus(net.IP, int) (string, error)
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/logs"
|
||||
)
|
||||
|
@ -45,7 +46,7 @@ import (
|
|||
const minLogCheckTime = 30 * time.Second
|
||||
|
||||
// WaitForAPIServerProcess waits for api server to be healthy returns error if it doesn't
|
||||
func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, start time.Time, timeout time.Duration) error {
|
||||
func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, timeout time.Duration) error {
|
||||
glog.Infof("waiting for apiserver process to appear ...")
|
||||
err := wait.PollImmediate(time.Millisecond*500, timeout, func() (bool, error) {
|
||||
if time.Since(start) > timeout {
|
||||
|
@ -53,7 +54,7 @@ func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
|
|||
}
|
||||
|
||||
if time.Since(start) > minLogCheckTime {
|
||||
announceProblems(r, bs, cr)
|
||||
announceProblems(r, bs, cfg, cr)
|
||||
time.Sleep(kconst.APICallRetryInterval * 5)
|
||||
}
|
||||
|
||||
|
@ -142,7 +143,7 @@ func podStatusMsg(pod core.Pod) string {
|
|||
}
|
||||
|
||||
// WaitForSystemPods verifies essential pods for running kurnetes is running
|
||||
func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, client *kubernetes.Clientset, start time.Time, timeout time.Duration) error {
|
||||
func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, timeout time.Duration) error {
|
||||
glog.Info("waiting for kube-system pods to appear ...")
|
||||
pStart := time.Now()
|
||||
|
||||
|
@ -151,7 +152,7 @@ func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr comm
|
|||
return false, fmt.Errorf("cluster wait timed out during pod check")
|
||||
}
|
||||
if time.Since(start) > minLogCheckTime {
|
||||
announceProblems(r, bs, cr)
|
||||
announceProblems(r, bs, cfg, cr)
|
||||
time.Sleep(kconst.APICallRetryInterval * 5)
|
||||
}
|
||||
|
||||
|
@ -179,7 +180,7 @@ func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr comm
|
|||
}
|
||||
|
||||
// WaitForHealthyAPIServer waits for api server status to be running
|
||||
func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, start time.Time, ip string, port int, timeout time.Duration) error {
|
||||
func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, ip string, port int, timeout time.Duration) error {
|
||||
glog.Infof("waiting for apiserver healthz status ...")
|
||||
hStart := time.Now()
|
||||
|
||||
|
@ -189,7 +190,7 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
|
|||
}
|
||||
|
||||
if time.Since(start) > minLogCheckTime {
|
||||
announceProblems(r, bs, cr)
|
||||
announceProblems(r, bs, cfg, cr)
|
||||
time.Sleep(kconst.APICallRetryInterval * 5)
|
||||
}
|
||||
|
||||
|
@ -212,8 +213,8 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
|
|||
}
|
||||
|
||||
// announceProblems checks for problems, and slows polling down if any are found
|
||||
func announceProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner) {
|
||||
problems := logs.FindProblems(r, bs, cr)
|
||||
func announceProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner) {
|
||||
problems := logs.FindProblems(r, bs, cfg, cr)
|
||||
if len(problems) > 0 {
|
||||
logs.OutputProblems(problems, 5)
|
||||
time.Sleep(kconst.APICallRetryInterval * 15)
|
||||
|
|
|
@ -110,7 +110,7 @@ func (k *Bootstrapper) GetAPIServerStatus(ip net.IP, port int) (string, error) {
|
|||
}
|
||||
|
||||
// LogCommands returns a map of log type to a command which will display that log.
|
||||
func (k *Bootstrapper) LogCommands(o bootstrapper.LogOptions) map[string]string {
|
||||
func (k *Bootstrapper) LogCommands(cfg config.ClusterConfig, o bootstrapper.LogOptions) map[string]string {
|
||||
var kubelet strings.Builder
|
||||
kubelet.WriteString("sudo journalctl -u kubelet")
|
||||
if o.Lines > 0 {
|
||||
|
@ -128,9 +128,15 @@ func (k *Bootstrapper) LogCommands(o bootstrapper.LogOptions) map[string]string
|
|||
if o.Lines > 0 {
|
||||
dmesg.WriteString(fmt.Sprintf(" | tail -n %d", o.Lines))
|
||||
}
|
||||
|
||||
describeNodes := fmt.Sprintf("sudo %s describe node -A --kubeconfig=%s",
|
||||
path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"),
|
||||
path.Join(vmpath.GuestPersistentDir, "kubeconfig"))
|
||||
|
||||
return map[string]string{
|
||||
"kubelet": kubelet.String(),
|
||||
"dmesg": dmesg.String(),
|
||||
"kubelet": kubelet.String(),
|
||||
"dmesg": dmesg.String(),
|
||||
"describe nodes": describeNodes,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -287,7 +293,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
|
|||
return err
|
||||
}
|
||||
|
||||
if err := kverify.WaitForAPIServerProcess(cr, k, k.c, start, timeout); err != nil {
|
||||
if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, start, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -296,7 +302,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
|
|||
return err
|
||||
}
|
||||
|
||||
if err := kverify.WaitForHealthyAPIServer(cr, k, k.c, start, ip, port, timeout); err != nil {
|
||||
if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, start, ip, port, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -305,7 +311,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
|
|||
return errors.Wrap(err, "get k8s client")
|
||||
}
|
||||
|
||||
if err := kverify.WaitForSystemPods(cr, k, k.c, c, start, timeout); err != nil {
|
||||
if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, c, start, timeout); err != nil {
|
||||
return errors.Wrap(err, "waiting for system pods")
|
||||
}
|
||||
return nil
|
||||
|
@ -411,11 +417,11 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
|
|||
}
|
||||
|
||||
// We must ensure that the apiserver is healthy before proceeding
|
||||
if err := kverify.WaitForAPIServerProcess(cr, k, k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
|
||||
if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
|
||||
return errors.Wrap(err, "apiserver healthz")
|
||||
}
|
||||
|
||||
if err := kverify.WaitForSystemPods(cr, k, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
|
||||
if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
|
||||
return errors.Wrap(err, "system pods")
|
||||
}
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
@ -87,9 +88,9 @@ type logRunner interface {
|
|||
const lookBackwardsCount = 400
|
||||
|
||||
// Follow follows logs from multiple files in tail(1) format
|
||||
func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr logRunner) error {
|
||||
func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner) error {
|
||||
cs := []string{}
|
||||
for _, v := range logCommands(r, bs, 0, true) {
|
||||
for _, v := range logCommands(r, bs, cfg, 0, true) {
|
||||
cs = append(cs, v+" &")
|
||||
}
|
||||
cs = append(cs, "wait")
|
||||
|
@ -109,9 +110,9 @@ func IsProblem(line string) bool {
|
|||
}
|
||||
|
||||
// FindProblems finds possible root causes among the logs
|
||||
func FindProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr logRunner) map[string][]string {
|
||||
func FindProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner) map[string][]string {
|
||||
pMap := map[string][]string{}
|
||||
cmds := logCommands(r, bs, lookBackwardsCount, false)
|
||||
cmds := logCommands(r, bs, cfg, lookBackwardsCount, false)
|
||||
for name := range cmds {
|
||||
glog.Infof("Gathering logs for %s ...", name)
|
||||
var b bytes.Buffer
|
||||
|
@ -153,8 +154,8 @@ func OutputProblems(problems map[string][]string, maxLines int) {
|
|||
}
|
||||
|
||||
// Output displays logs from multiple sources in tail(1) format
|
||||
func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner command.Runner, lines int) error {
|
||||
cmds := logCommands(r, bs, lines, false)
|
||||
func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, runner command.Runner, lines int) error {
|
||||
cmds := logCommands(r, bs, cfg, lines, false)
|
||||
cmds["kernel"] = "uptime && uname -a && grep PRETTY /etc/os-release"
|
||||
|
||||
names := []string{}
|
||||
|
@ -191,8 +192,8 @@ func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner command.Run
|
|||
}
|
||||
|
||||
// logCommands returns a list of commands that would be run to receive the anticipated logs
|
||||
func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, length int, follow bool) map[string]string {
|
||||
cmds := bs.LogCommands(bootstrapper.LogOptions{Lines: length, Follow: follow})
|
||||
func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, length int, follow bool) map[string]string {
|
||||
cmds := bs.LogCommands(cfg, bootstrapper.LogOptions{Lines: length, Follow: follow})
|
||||
for _, pod := range importantPods {
|
||||
ids, err := r.ListContainers(cruntime.ListOptions{Name: pod})
|
||||
if err != nil {
|
||||
|
@ -211,5 +212,6 @@ func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, length int, f
|
|||
}
|
||||
cmds[r.Name()] = r.SystemLogCmd(length)
|
||||
cmds["container status"] = cruntime.ContainerStatusCommand()
|
||||
|
||||
return cmds
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
|
|||
bs = setupKubeAdm(machineAPI, cc, n)
|
||||
err = bs.StartCluster(cc)
|
||||
if err != nil {
|
||||
exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner))
|
||||
exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, cc, mRunner))
|
||||
}
|
||||
} else {
|
||||
bs, err = cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n)
|
||||
|
|
Loading…
Reference in New Issue