Add pod logs to 'logs' command, show detected problems during minikube start
parent
6a18ad9bc5
commit
08da5fe3b5
|
@ -23,13 +23,25 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
|
||||
cmdUtil "k8s.io/minikube/cmd/util"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/console"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/logs"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
)
|
||||
|
||||
const (
|
||||
// number of problems per log to output
|
||||
numberOfProblems = 5
|
||||
)
|
||||
|
||||
var (
|
||||
follow bool
|
||||
// followLogs triggers tail -f mode
|
||||
followLogs bool
|
||||
// numberOfLines is how many lines to output, set via -n
|
||||
numberOfLines int
|
||||
// showProblems only shows lines that match known issues
|
||||
showProblems bool
|
||||
)
|
||||
|
||||
// logsCmd represents the logs command
|
||||
|
@ -38,26 +50,57 @@ var logsCmd = &cobra.Command{
|
|||
Short: "Gets the logs of the running instance, used for debugging minikube, not user code",
|
||||
Long: `Gets the logs of the running instance, used for debugging minikube, not user code.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cfg, err := config.Load()
|
||||
if err != nil {
|
||||
console.Fatal("Error getting config: %v", err)
|
||||
}
|
||||
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
console.Fatal("Error getting client: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer api.Close()
|
||||
clusterBootstrapper, err := GetClusterBootstrapper(api, viper.GetString(cmdcfg.Bootstrapper))
|
||||
|
||||
h, err := api.Load(config.GetMachineName())
|
||||
if err != nil {
|
||||
glog.Exitf("api load: %v", err)
|
||||
}
|
||||
runner, err := machine.CommandRunner(h)
|
||||
if err != nil {
|
||||
glog.Exitf("command runner: %v", err)
|
||||
}
|
||||
bs, err := GetClusterBootstrapper(api, viper.GetString(cmdcfg.Bootstrapper))
|
||||
if err != nil {
|
||||
glog.Exitf("Error getting cluster bootstrapper: %v", err)
|
||||
}
|
||||
|
||||
err = clusterBootstrapper.GetClusterLogsTo(follow, os.Stdout)
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: runner})
|
||||
if err != nil {
|
||||
console.Fatal("Error getting machine logs:", err)
|
||||
cmdUtil.MaybeReportErrorAndExit(err)
|
||||
glog.Exitf("Unable to get runtime: %v", err)
|
||||
}
|
||||
if followLogs {
|
||||
err := logs.Follow(cr, bs, runner)
|
||||
if err != nil {
|
||||
console.Failure("output: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if showProblems {
|
||||
problems := logs.FindProblems(cr, bs, runner)
|
||||
logs.OutputProblems(problems, numberOfProblems)
|
||||
return
|
||||
}
|
||||
err = logs.Output(cr, bs, runner, numberOfLines)
|
||||
if err != nil {
|
||||
console.Failure("output: %v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
logsCmd.Flags().BoolVarP(&follow, "follow", "f", false, "Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.")
|
||||
logsCmd.Flags().BoolVarP(&followLogs, "follow", "f", false, "Show only the most recent journal entries, and continuously print new entries as they are appended to the journal.")
|
||||
logsCmd.Flags().BoolVar(&showProblems, "problems", false, "Show only log entries which point to known problems")
|
||||
logsCmd.Flags().IntVarP(&numberOfLines, "length", "n", 50, "Number of lines back to go within the log")
|
||||
RootCmd.AddCommand(logsCmd)
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/console"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/logs"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
pkgutil "k8s.io/minikube/pkg/util"
|
||||
"k8s.io/minikube/pkg/util/kubeconfig"
|
||||
|
@ -186,15 +187,19 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
if err := saveConfig(config); err != nil {
|
||||
reportErrAndExit("Failed to save config", err)
|
||||
}
|
||||
runner, err := machine.CommandRunner(host)
|
||||
if err != nil {
|
||||
reportErrAndExit("Failed to get command runner", err)
|
||||
}
|
||||
|
||||
configureRuntimes(host)
|
||||
cr := configureRuntimes(host, runner)
|
||||
bs := prepareHostEnvironment(m, config.KubernetesConfig)
|
||||
waitCacheImages(&cacheGroup)
|
||||
|
||||
// The kube config must be update must come before bootstrapping, otherwise health checks may use a stale IP
|
||||
kubeconfig := updateKubeConfig(host, &config)
|
||||
bootstrapCluster(bs, config.KubernetesConfig, preexisting)
|
||||
validateCluster(bs, ip)
|
||||
bootstrapCluster(bs, cr, runner, config.KubernetesConfig, preexisting)
|
||||
validateCluster(bs, cr, runner, ip)
|
||||
configureMounts()
|
||||
if err = LoadCachedImagesInConfigFile(); err != nil {
|
||||
console.Failure("Unable to load cached images from config file.")
|
||||
|
@ -451,12 +456,7 @@ func updateKubeConfig(h *host.Host, c *cfg.Config) *kubeconfig.KubeConfigSetup {
|
|||
}
|
||||
|
||||
// configureRuntimes does what needs to happen to get a runtime going.
|
||||
func configureRuntimes(h *host.Host) {
|
||||
runner, err := machine.CommandRunner(h)
|
||||
if err != nil {
|
||||
reportErrAndExit("Failed to get command runner", err)
|
||||
}
|
||||
|
||||
func configureRuntimes(h *host.Host, runner bootstrapper.CommandRunner) cruntime.Manager {
|
||||
config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: runner}
|
||||
cr, err := cruntime.New(config)
|
||||
if err != nil {
|
||||
|
@ -475,7 +475,7 @@ func configureRuntimes(h *host.Host) {
|
|||
if err != nil {
|
||||
cmdutil.MaybeReportErrorAndExit(err)
|
||||
}
|
||||
|
||||
return cr
|
||||
}
|
||||
|
||||
// waitCacheImages blocks until the image cache jobs complete
|
||||
|
@ -490,7 +490,7 @@ func waitCacheImages(g *errgroup.Group) {
|
|||
}
|
||||
|
||||
// bootstrapCluster starts Kubernetes using the chosen bootstrapper
|
||||
func bootstrapCluster(bs bootstrapper.Bootstrapper, kc cfg.KubernetesConfig, preexisting bool) {
|
||||
func bootstrapCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner bootstrapper.CommandRunner, kc cfg.KubernetesConfig, preexisting bool) {
|
||||
console.OutStyle("pulling", "Pulling images used by Kubernetes %s ...", kc.KubernetesVersion)
|
||||
if err := bs.PullImages(kc); err != nil {
|
||||
fmt.Printf("Unable to pull images, which may be OK: %v", err)
|
||||
|
@ -501,19 +501,19 @@ func bootstrapCluster(bs bootstrapper.Bootstrapper, kc cfg.KubernetesConfig, pre
|
|||
if preexisting {
|
||||
console.OutStyle("restarting", "Relaunching Kubernetes %s using %s ... ", kc.KubernetesVersion, bsName)
|
||||
if err := bs.RestartCluster(kc); err != nil {
|
||||
reportErrAndExit("Error restarting cluster", err)
|
||||
reportProblemsAndExit("Error restarting cluster", err, logs.FindProblems(r, bs, runner))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
console.OutStyle("launch", "Launching Kubernetes %s using %s ... ", kc.KubernetesVersion, bsName)
|
||||
if err := bs.StartCluster(kc); err != nil {
|
||||
reportErrAndExit("Error starting cluster", err)
|
||||
reportProblemsAndExit("Error starting cluster", err, logs.FindProblems(r, bs, runner))
|
||||
}
|
||||
}
|
||||
|
||||
// validateCluster validates that the cluster is well-configured and healthy
|
||||
func validateCluster(bs bootstrapper.Bootstrapper, ip string) {
|
||||
func validateCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner bootstrapper.CommandRunner, ip string) {
|
||||
console.OutStyle("verifying-noline", "Verifying component health ...")
|
||||
kStat := func() (err error) {
|
||||
st, err := bs.GetKubeletStatus()
|
||||
|
@ -525,7 +525,7 @@ func validateCluster(bs bootstrapper.Bootstrapper, ip string) {
|
|||
}
|
||||
err := pkgutil.RetryAfter(20, kStat, 3*time.Second)
|
||||
if err != nil {
|
||||
reportErrAndExit("kubelet checks failed", err)
|
||||
reportProblemsAndExit("kubelet checks failed", err, logs.FindProblems(r, bs, runner))
|
||||
}
|
||||
aStat := func() (err error) {
|
||||
st, err := bs.GetApiServerStatus(net.ParseIP(ip))
|
||||
|
@ -538,7 +538,7 @@ func validateCluster(bs bootstrapper.Bootstrapper, ip string) {
|
|||
|
||||
err = pkgutil.RetryAfter(30, aStat, 10*time.Second)
|
||||
if err != nil {
|
||||
reportErrAndExit("apiserver checks failed", err)
|
||||
reportProblemsAndExit("apiserver checks failed", err, logs.FindProblems(r, bs, runner))
|
||||
}
|
||||
console.OutLn("")
|
||||
}
|
||||
|
@ -623,9 +623,17 @@ func fatalExit(format string, a ...interface{}) {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
// reportFatalExit is a shortcut for outputting an error, reporting it, and exiting.
|
||||
// reportErrAndExit is a shortcut for outputting an error, reporting it, and exiting.
|
||||
func reportErrAndExit(msg string, err error) {
|
||||
console.Fatal(msg+": %v", err)
|
||||
cmdutil.MaybeReportErrorAndExit(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// reportFatalExit is a shortcut for outputting an error, reporting it, and exiting.
|
||||
func reportProblemsAndExit(msg string, err error, problems map[string][]string) {
|
||||
console.Fatal(msg+": %v", err)
|
||||
logs.OutputProblems(problems, 3)
|
||||
cmdutil.MaybeReportErrorAndExit(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package bootstrapper
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
|
@ -32,7 +31,8 @@ type Bootstrapper interface {
|
|||
UpdateCluster(config.KubernetesConfig) error
|
||||
RestartCluster(config.KubernetesConfig) error
|
||||
DeleteCluster(config.KubernetesConfig) error
|
||||
GetClusterLogsTo(follow bool, out io.Writer) error
|
||||
// LogCommands returns a map of log type to a command which will display that log.
|
||||
LogCommands(int, bool) map[string]string
|
||||
SetupCerts(cfg config.KubernetesConfig) error
|
||||
GetKubeletStatus() (string, error)
|
||||
GetApiServerStatus(net.IP) (string, error)
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"crypto"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
@ -121,28 +120,17 @@ func (k *KubeadmBootstrapper) GetApiServerStatus(ip net.IP) (string, error) {
|
|||
return state.Running.String(), nil
|
||||
}
|
||||
|
||||
// TODO(r2d4): Should this aggregate all the logs from the control plane?
|
||||
// Maybe subcommands for each component? minikube logs apiserver?
|
||||
func (k *KubeadmBootstrapper) GetClusterLogsTo(follow bool, out io.Writer) error {
|
||||
var flags []string
|
||||
if follow {
|
||||
flags = append(flags, "-f")
|
||||
// LogCommands returns a map of log type to a command which will display that log.
|
||||
func (k *KubeadmBootstrapper) LogCommands(len int, follow bool) map[string]string {
|
||||
var kcmd strings.Builder
|
||||
kcmd.WriteString("journalctl -u kubelet")
|
||||
if len > 0 {
|
||||
kcmd.WriteString(fmt.Sprintf(" -n %d", len))
|
||||
}
|
||||
logsCommand := fmt.Sprintf("sudo journalctl %s -u kubelet", strings.Join(flags, " "))
|
||||
|
||||
if follow {
|
||||
if err := k.c.CombinedOutputTo(logsCommand, out); err != nil {
|
||||
return errors.Wrap(err, "getting cluster logs")
|
||||
kcmd.WriteString(" -f")
|
||||
}
|
||||
} else {
|
||||
|
||||
logs, err := k.c.CombinedOutput(logsCommand)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting cluster logs")
|
||||
}
|
||||
fmt.Fprint(out, logs)
|
||||
}
|
||||
return nil
|
||||
return map[string]string{"kubelet": kcmd.String()}
|
||||
}
|
||||
|
||||
func (k *KubeadmBootstrapper) StartCluster(k8s config.KubernetesConfig) error {
|
||||
|
|
|
@ -49,6 +49,7 @@ var styles = map[string]style{
|
|||
"launch": {Prefix: "🚀 "},
|
||||
"thumbs-up": {Prefix: "👍 "},
|
||||
"option": {Prefix: " ▪ "}, // Indented bullet
|
||||
"bullet": {Prefix: " ▪ "}, // Indented bullet
|
||||
"crushed": {Prefix: "💔 "},
|
||||
|
||||
// Specialized purpose styles
|
||||
|
|
|
@ -107,3 +107,8 @@ func (r *Containerd) KillContainers(ids []string) error {
|
|||
func (r *Containerd) StopContainers(ids []string) error {
|
||||
return stopCRIContainers(r.Runner, ids)
|
||||
}
|
||||
|
||||
// ContainerLogCmd returns the command to retrieve the log for a container based on ID
|
||||
func (r *Containerd) ContainerLogCmd(id string, len int, follow bool) string {
|
||||
return criContainerLogCmd(id, len, follow)
|
||||
}
|
||||
|
|
|
@ -60,3 +60,18 @@ image-endpoint: unix://{{.Socket}}
|
|||
}
|
||||
return cr.Run(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s", path.Dir(cPath), b.String(), cPath))
|
||||
}
|
||||
|
||||
// criContainerLogCmd returns the command to retrieve the log for a container based on ID
|
||||
func criContainerLogCmd(id string, len int, follow bool) string {
|
||||
var cmd strings.Builder
|
||||
cmd.WriteString("crictl logs ")
|
||||
if len > 0 {
|
||||
cmd.WriteString(fmt.Sprintf("--tail %d ", len))
|
||||
}
|
||||
if follow {
|
||||
cmd.WriteString("--follow ")
|
||||
}
|
||||
|
||||
cmd.WriteString(id)
|
||||
return cmd.String()
|
||||
}
|
||||
|
|
|
@ -106,3 +106,8 @@ func (r *CRIO) KillContainers(ids []string) error {
|
|||
func (r *CRIO) StopContainers(ids []string) error {
|
||||
return stopCRIContainers(r.Runner, ids)
|
||||
}
|
||||
|
||||
// ContainerLogCmd returns the command to retrieve the log for a container based on ID
|
||||
func (r *CRIO) ContainerLogCmd(id string, len int, follow bool) string {
|
||||
return criContainerLogCmd(id, len, follow)
|
||||
}
|
||||
|
|
|
@ -61,6 +61,8 @@ type Manager interface {
|
|||
KillContainers([]string) error
|
||||
// StopContainers stops containers based on ID
|
||||
StopContainers([]string) error
|
||||
// ContainerLogCmd returns the command to retrieve the log for a container based on ID
|
||||
ContainerLogCmd(string, int, bool) string
|
||||
}
|
||||
|
||||
// Config is runtime configuration
|
||||
|
|
|
@ -101,3 +101,18 @@ func (r *Docker) KillContainers(ids []string) error {
|
|||
func (r *Docker) StopContainers(ids []string) error {
|
||||
return r.Runner.Run(fmt.Sprintf("docker stop %s", strings.Join(ids, " ")))
|
||||
}
|
||||
|
||||
// ContainerLogCmd returns the command to retrieve the log for a container based on ID
|
||||
func (r *Docker) ContainerLogCmd(id string, len int, follow bool) string {
|
||||
var cmd strings.Builder
|
||||
cmd.WriteString("docker logs ")
|
||||
if len > 0 {
|
||||
cmd.WriteString(fmt.Sprintf("--tail %d ", len))
|
||||
}
|
||||
if follow {
|
||||
cmd.WriteString("--follow ")
|
||||
}
|
||||
|
||||
cmd.WriteString(id)
|
||||
return cmd.String()
|
||||
}
|
||||
|
|
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// package logs are convenience methods for fetching logs from a minikube cluster
|
||||
package logs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/machine/libmachine/log"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
"k8s.io/minikube/pkg/minikube/console"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
)
|
||||
|
||||
// rootCauseRe is a regular expression that matches known failure root causes
|
||||
var rootCauseRe = regexp.MustCompile(`^error: |eviction manager: pods.* evicted|unknown flag: --`)
|
||||
|
||||
// importantPods are a list of pods to retrieve logs for, in addition to the bootstrapper logs.
|
||||
var importantPods = []string{
|
||||
"k8s_kube-apiserver",
|
||||
"k8s_coredns_coredns",
|
||||
"k8s_kube-scheduler",
|
||||
}
|
||||
|
||||
// Follow follows logs from multiple files in tail(1) format
|
||||
func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner bootstrapper.CommandRunner) error {
|
||||
cs := []string{}
|
||||
for _, v := range logCommands(r, bs, 0, true) {
|
||||
cs = append(cs, v+" &")
|
||||
}
|
||||
cs = append(cs, "wait")
|
||||
return runner.CombinedOutputTo(strings.Join(cs, " "), os.Stdout)
|
||||
}
|
||||
|
||||
// IsProblem returns whether this line matches a known problem
|
||||
func IsProblem(line string) bool {
|
||||
return rootCauseRe.MatchString(line)
|
||||
}
|
||||
|
||||
// FindProblems finds possible root causes among the logs
|
||||
func FindProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner bootstrapper.CommandRunner) map[string][]string {
|
||||
pMap := map[string][]string{}
|
||||
cmds := logCommands(r, bs, 200, false)
|
||||
for name, cmd := range cmds {
|
||||
log.Infof("Gathering logs for %s ...", name)
|
||||
out, err := runner.CombinedOutput(cmds[name])
|
||||
if err != nil {
|
||||
glog.Warningf("failed %s: %s: %v", name, cmd, err)
|
||||
continue
|
||||
}
|
||||
log.Infof("log length: %d", len(out))
|
||||
|
||||
problems := []string{}
|
||||
for _, l := range strings.Split(out, "\n") {
|
||||
if IsProblem(l) {
|
||||
glog.Warningf("Found %s problem: %s", name, l)
|
||||
problems = append(problems, l)
|
||||
}
|
||||
}
|
||||
if len(problems) > 0 {
|
||||
pMap[name] = problems
|
||||
}
|
||||
}
|
||||
return pMap
|
||||
}
|
||||
|
||||
// OutputProblems outputs discovered problems.
|
||||
func OutputProblems(problems map[string][]string, maxLines int) {
|
||||
for name, lines := range problems {
|
||||
console.OutStyle("failure", "Problem detected in %q", name)
|
||||
if len(lines) > maxLines {
|
||||
lines = lines[len(lines)-maxLines:]
|
||||
}
|
||||
for _, l := range lines {
|
||||
console.OutStyle("bullet", l)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Output displays logs from multiple sources in tail(1) format
|
||||
func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner bootstrapper.CommandRunner, lines int) error {
|
||||
cmds := logCommands(r, bs, lines, false)
|
||||
names := []string{}
|
||||
for k := range cmds {
|
||||
names = append(names, k)
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
failed := []string{}
|
||||
for _, name := range names {
|
||||
console.OutLn("==> %s <==", name)
|
||||
out, err := runner.CombinedOutput(cmds[name])
|
||||
if err != nil {
|
||||
glog.Errorf("failed: %v", err)
|
||||
failed = append(failed, name)
|
||||
continue
|
||||
}
|
||||
console.OutLn(out)
|
||||
}
|
||||
if len(failed) > 0 {
|
||||
return fmt.Errorf("unable to fetch logs for: %s", strings.Join(failed, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// logCommands returns a list of commands that would be run to receive the anticipated logs
|
||||
func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, length int, follow bool) map[string]string {
|
||||
cmds := bs.LogCommands(length, follow)
|
||||
for _, pod := range importantPods {
|
||||
ids, err := r.ListContainers(pod)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to list containers for %q: %v", pod, err)
|
||||
continue
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
glog.Errorf("No containers found matching %q", pod)
|
||||
continue
|
||||
}
|
||||
cmds[pod] = r.ContainerLogCmd(ids[0], length, follow)
|
||||
}
|
||||
return cmds
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsProblem(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
want bool
|
||||
input string
|
||||
}{
|
||||
{"almost", false, "F2350 I would love to be an unknown flag, but I am not -- :( --"},
|
||||
{"apiserver-required-flag #1962", true, "error: [service-account-issuer is a required flag when BoundServiceAccountTokenVolume is enabled, --service-account-signing-key-file and --service-account-issuer are required flags"},
|
||||
{"kubelet-eviction #", true, "I0213 07:16:44.041623 2410 eviction_manager.go:187] eviction manager: pods kube-apiserver-minikube_kube-system(87f41e2e0629c3deb5c2239e08d8045d) evicted, waiting for pod to be cleaned up"},
|
||||
{"kubelet-unknown-flag #3655", true, "F0212 14:55:46.443031 2693 server.go:148] unknown flag: --AllowedUnsafeSysctls"},
|
||||
{"apiserver-auth-mode #2852", true, `{"log":"Error: unknown flag: --Authorization.Mode\n","stream":"stderr","time":"2018-06-17T22:16:35.134161966Z"}`},
|
||||
{"apiserver-admmission #3524", true, "error: unknown flag: --GenericServerRunOptions.AdmissionControl"},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := IsProblem(tc.input)
|
||||
if got != tc.want {
|
||||
t.Fatalf("IsProblem(%s)=%v, want %v", tc.input, got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue