Merge branch 'master' of github.com:kubernetes/minikube into gcp-auth-0.0.6

pull/11665/head
Sharif Elgamal 2021-06-17 11:43:37 -07:00
commit 1e0ce5c090
30 changed files with 1326 additions and 42 deletions

18
.github/workflows/time-to-k8s.yml vendored Normal file
View File

@ -0,0 +1,18 @@
name: "time-to-k8s benchmark"
on:
release:
types: [released]
jobs:
benchmark:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Checkout submodules
run: git submodule update --init
- uses: actions/setup-go@v2
with:
go-version: 1.16.5
stable: true
- name: Benchmark
run: |
./hack/benchmark/time-to-k8s/time-to-k8s.sh ${{ secrets.MINIKUBE_BOT_PAT }}

4
.gitmodules vendored
View File

@ -1,6 +1,6 @@
[submodule "site/themes/docsy"]
path = site/themes/docsy
url = https://github.com/google/docsy.git
[submodule "hack/benchmark/time-to-k8s/time-to-k8s"]
path = hack/benchmark/time-to-k8s/time-to-k8s
[submodule "hack/benchmark/time-to-k8s/time-to-k8s-repo"]
path = hack/benchmark/time-to-k8s/time-to-k8s-repo
url = https://github.com/tstromberg/time-to-k8s.git

View File

@ -40,7 +40,7 @@ KVM_GO_VERSION ?= $(GO_VERSION:.0=)
INSTALL_SIZE ?= $(shell du out/minikube-windows-amd64.exe | cut -f1)
BUILDROOT_BRANCH ?= 2020.02.12
REGISTRY?=gcr.io/k8s-minikube
REGISTRY ?= gcr.io/k8s-minikube
# Get git commit id
COMMIT_NO := $(shell git rev-parse HEAD 2> /dev/null || true)
@ -705,6 +705,21 @@ KICBASE_IMAGE_GCR ?= $(REGISTRY)/kicbase:$(KIC_VERSION)
KICBASE_IMAGE_HUB ?= kicbase/stable:$(KIC_VERSION)
KICBASE_IMAGE_REGISTRIES ?= $(KICBASE_IMAGE_GCR) $(KICBASE_IMAGE_HUB)
.PHONY: local-kicbase
local-kicbase: deploy/kicbase/auto-pause ## Builds the kicbase image and tags it local/kicbase:latest and local/kicbase:$(KIC_VERSION)-$(COMMIT_SHORT)
docker build -f ./deploy/kicbase/Dockerfile -t local/kicbase:$(KIC_VERSION) --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --cache-from $(KICBASE_IMAGE_GCR) ./deploy/kicbase
docker tag local/kicbase:$(KIC_VERSION) local/kicbase:latest
docker tag local/kicbase:$(KIC_VERSION) local/kicbase:$(KIC_VERSION)-$(COMMIT_SHORT)
SED = sed -i
ifeq ($(GOOS),darwin)
SED = sed -i ''
endif
.PHONY: local-kicbase-debug
local-kicbase-debug: local-kicbase ## Builds a local kicbase image and switches source code to point to it
$(SED) 's|Version = .*|Version = \"$(KIC_VERSION)-$(COMMIT_SHORT)\"|;s|baseImageSHA = .*|baseImageSHA = \"\"|;s|gcrRepo = .*|gcrRepo = \"local/kicbase\"|;s|dockerhubRepo = .*|dockerhubRepo = \"local/kicbase\"|' pkg/drivers/kic/types.go
.PHONY: push-kic-base-image
push-kic-base-image: deploy/kicbase/auto-pause docker-multi-arch-builder ## Push multi-arch local/kicbase:latest to all remote registries
ifdef AUTOPUSH

View File

@ -87,6 +87,24 @@ func (error DeletionError) Error() string {
return error.Err.Error()
}
var hostAndDirsDeleter = func(api libmachine.API, cc *config.ClusterConfig, profileName string) error {
if err := killMountProcess(); err != nil {
out.FailureT("Failed to kill mount process: {{.error}}", out.V{"error": err})
}
deleteHosts(api, cc)
// In case DeleteHost didn't complete the job.
deleteProfileDirectory(profileName)
deleteMachineDirectories(cc)
if err := deleteConfig(profileName); err != nil {
return err
}
return deleteContext(profileName)
}
func init() {
deleteCmd.Flags().BoolVar(&deleteAll, "all", false, "Set flag to delete all profiles")
deleteCmd.Flags().BoolVar(&purge, "purge", false, "Set this flag to delete the '.minikube' folder from your user directory.")
@ -282,23 +300,10 @@ func deleteProfile(ctx context.Context, profile *config.Profile) error {
}
}
if err := killMountProcess(); err != nil {
out.FailureT("Failed to kill mount process: {{.error}}", out.V{"error": err})
}
deleteHosts(api, cc)
// In case DeleteHost didn't complete the job.
deleteProfileDirectory(profile.Name)
deleteMachineDirectories(cc)
if err := deleteConfig(profile.Name); err != nil {
if err := hostAndDirsDeleter(api, cc, profile.Name); err != nil {
return err
}
if err := deleteContext(profile.Name); err != nil {
return err
}
out.Step(style.Deleted, `Removed all traces of the "{{.name}}" cluster.`, out.V{"name": profile.Name})
return nil
}

View File

@ -17,15 +17,18 @@ limitations under the License.
package cmd
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/docker/machine/libmachine"
"github.com/google/go-cmp/cmp"
"github.com/otiai10/copy"
"github.com/spf13/viper"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/localpath"
)
@ -114,6 +117,7 @@ func TestDeleteProfile(t *testing.T) {
t.Logf("load failure: %v", err)
}
hostAndDirsDeleter = hostAndDirsDeleterMock
errs := DeleteProfiles([]*config.Profile{profile})
if len(errs) > 0 {
HandleDeletionErrors(errs)
@ -154,6 +158,17 @@ func TestDeleteProfile(t *testing.T) {
}
}
var hostAndDirsDeleterMock = func(api libmachine.API, cc *config.ClusterConfig, profileName string) error {
return deleteContextTest()
}
func deleteContextTest() error {
if err := cmdcfg.Unset(config.ProfileName); err != nil {
return DeletionError{Err: fmt.Errorf("unset minikube profile: %v", err), Errtype: Fatal}
}
return nil
}
func TestDeleteAllProfiles(t *testing.T) {
td, err := ioutil.TempDir("", "all")
if err != nil {
@ -207,6 +222,7 @@ func TestDeleteAllProfiles(t *testing.T) {
}
profiles := append(validProfiles, inValidProfiles...)
hostAndDirsDeleter = hostAndDirsDeleterMock
errs := DeleteProfiles(profiles)
if errs != nil {

View File

@ -97,7 +97,7 @@ func Execute() {
if runtime.GOOS == "darwin" && detect.IsAmd64M1Emulation() {
exit.Message(reason.WrongBinaryM1, "You are trying to run amd64 binary on M1 system. Please use darwin/arm64 binary instead (Download at {{.url}}.)",
out.V{"url": notify.DownloadURL(version.GetVersion(), "darwin", "amd64")})
out.V{"url": notify.DownloadURL(version.GetVersion(), "darwin", "arm64")})
}
_, callingCmd := filepath.Split(os.Args[0])

@ -0,0 +1 @@
Subproject commit 72506e948764aeeafc01e58e6bec0ea741c61ca0

View File

@ -31,31 +31,58 @@ install_minikube() {
sudo install ./out/minikube /usr/local/bin/minikube
}
install_gh() {
export access_token="$1"
# Make sure gh is installed and configured
./hack/jenkins/installers/check_install_gh.sh
}
config_git() {
git config user.name "minikube-bot"
git config user.email "minikube-bot@google.com"
}
create_branch() {
git checkout -b addTimeToK8s"$1"
}
run_benchmark() {
( cd ./hack/benchmark/time-to-k8s/time-to-k8s/ &&
pwd
( cd ./hack/benchmark/time-to-k8s/time-to-k8s-repo/ &&
git submodule update --init &&
go run . --config local-kubernetes.yaml --iterations 5 --output output.csv )
}
generate_chart() {
go run ./hack/benchmark/time-to-k8s/chart.go --csv ./hack/benchmark/time-to-k8s/time-to-k8s/output.csv --output ./site/static/images/benchmarks/timeToK8s/"$1".png
go run ./hack/benchmark/time-to-k8s/chart.go --csv ./hack/benchmark/time-to-k8s/time-to-k8s-repo/output.csv --output ./site/static/images/benchmarks/timeToK8s/"$1".png
}
create_page() {
printf -- "---\ntitle: \"%s Benchmark\"\nlinkTitle: \"%s Benchmark\"\nweight: 1\n---\n\n![time-to-k8s](/images/benchmarks/timeToK8s/%s.png)\n" "$1" "$1" "$1" > ./site/content/en/docs/benchmarks/timeToK8s/"$1".md
}
commit_chart() {
commit_changes() {
git add ./site/static/images/benchmarks/timeToK8s/"$1".png ./site/content/en/docs/benchmarks/timeToK8s/"$1".md
git commit -m 'update time-to-k8s chart'
git commit -m "add time-to-k8s benchmark for $1"
}
create_pr() {
git remote add minikube-bot https://minikube-bot:"$2"@github.com/minikube-bot/minikube.git
git push -u minikube-bot addTimeToK8s"$1"
gh pr create --repo kubernetes/minikube --base master --title "Add time-to-k8s benchmark for $1" --body "Updating time-to-k8s benchmark as part of the release process"
}
install_kind
install_k3d
install_minikube
VERSION=$(minikube version --short)
install_gh "$1"
config_git
VERSION=$(minikube version --short)
create_branch "$VERSION"
run_benchmark
generate_chart "$VERSION"
create_page "$VERSION"
commit_chart "$VERSION"
commit_changes "$VERSION"
create_pr "$VERSION" "$1"

View File

@ -419,7 +419,7 @@ fi
touch "${HTML_OUT}"
touch "${SUMMARY_OUT}"
gopogh_status=$(gopogh -in "${JSON_OUT}" -out_html "${HTML_OUT}" -out_summary "${SUMMARY_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}") || true
gopogh_status=$(gopogh -in "${JSON_OUT}" -out_html "${HTML_OUT}" -out_summary "${SUMMARY_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}:$(date +%Y-%m-%d)") || true
fail_num=$(echo $gopogh_status | jq '.NumberOfFail')
test_num=$(echo $gopogh_status | jq '.NumberOfTests')
pessimistic_status="${fail_num} / ${test_num} failures"
@ -441,6 +441,11 @@ if [ -z "${EXTERNAL}" ]; then
gsutil -qm cp "${HTML_OUT}" "gs://${JOB_GCS_BUCKET}.html" || true
echo ">> uploading ${SUMMARY_OUT}"
gsutil -qm cp "${SUMMARY_OUT}" "gs://${JOB_GCS_BUCKET}_summary.json" || true
if [[ "${MINIKUBE_LOCATION}" == "master" ]]; then
./test-flake-chart/upload_tests.sh "${SUMMARY_OUT}"
elif [[ "${JOB_NAME}" == "Docker_Linux" || "${JOB_NAME}" == "Docker_Linux_containerd" || "${JOB_NAME}" == "KVM_Linux" || "${JOB_NAME}" == "KVM_Linux_containerd" ]]; then
./test-flake-chart/report_flakes.sh "${MINIKUBE_LOCATION}" "${SUMMARY_OUT}" "${JOB_NAME}"
fi
else
# Otherwise, put the results in a predictable spot so the upload job can find them
REPORTS_PATH=test_reports

View File

@ -0,0 +1,31 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Collects all test data manually, processes it, and uploads to GCS. This will
# overwrite any existing data. This should only be done for a dryrun, new data
# should be handled exclusively through upload_tests.sh.
# Example usage: ./collect_data_manual.sh
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# 1) "cat" together all summary files.
# 2) Process all summary files.
# 3) Optimize the resulting data.
# 4) Store in GCS bucket.
gsutil cat gs://minikube-builds/logs/master/*/*_summary.json \
| $DIR/process_data.sh \
| $DIR/optimize_data.sh \
| gsutil cp - gs://minikube-flake-rate/data.csv

View File

@ -0,0 +1,264 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bufio"
"flag"
"fmt"
"io"
"os"
"runtime/debug"
"sort"
"strconv"
"strings"
"time"
)
var (
dataCsv = flag.String("data-csv", "", "Source data to compute flake rates on")
dateRange = flag.Uint("date-range", 5, "Number of test dates to consider when computing flake rate")
)
func main() {
flag.Parse()
file, err := os.Open(*dataCsv)
if err != nil {
exit("Unable to read data CSV", err)
}
testEntries := readData(file)
splitEntries := splitData(testEntries)
filteredEntries := filterRecentEntries(splitEntries, *dateRange)
flakeRates := computeFlakeRates(filteredEntries)
averageDurations := computeAverageDurations(filteredEntries)
fmt.Println("Environment,Test,Flake Rate,Duration")
for environment, environmentSplit := range flakeRates {
for test, flakeRate := range environmentSplit {
duration := averageDurations[environment][test]
fmt.Printf("%s,%s,%.2f,%.3f\n", environment, test, flakeRate*100, duration)
}
}
}
// One entry of a test run.
// Example: TestEntry {
// name: "TestFunctional/parallel/LogsCmd",
// environment: "Docker_Linux",
// date: time.Now,
// status: "Passed",
// duration: 0.1,
// }
type testEntry struct {
name string
environment string
date time.Time
status string
duration float32
}
// A map with keys of (environment, test_name) to values of slcies of TestEntry.
type splitEntryMap map[string]map[string][]testEntry
// Reads CSV `file` and consumes each line to be a single TestEntry.
func readData(file io.Reader) []testEntry {
testEntries := []testEntry{}
fileReader := bufio.NewReaderSize(file, 256)
previousLine := []string{"", "", "", "", "", ""}
firstLine := true
for {
lineBytes, _, err := fileReader.ReadLine()
if err != nil {
if err == io.EOF {
break
}
exit("Error reading data CSV", err)
}
line := string(lineBytes)
fields := strings.Split(line, ",")
if firstLine {
if len(fields) != 6 {
exit(fmt.Sprintf("Data CSV in incorrect format. Expected 6 columns, but got %d", len(fields)), fmt.Errorf("bad CSV format"))
}
firstLine = false
}
for i, field := range fields {
if field == "" {
fields[i] = previousLine[i]
}
}
if len(fields) != 6 {
fmt.Printf("Found line with wrong number of columns. Expectd 6, but got %d - skipping\n", len(fields))
continue
}
previousLine = fields
if fields[4] == "Passed" || fields[4] == "Failed" {
date, err := time.Parse("2006-01-02", fields[1])
if err != nil {
fmt.Printf("Failed to parse date: %v\n", err)
continue
}
duration, err := strconv.ParseFloat(fields[5], 32)
if err != nil {
fmt.Printf("Failed to parse duration: %v\n", err)
continue
}
testEntries = append(testEntries, testEntry{
name: fields[3],
environment: fields[2],
date: date,
status: fields[4],
duration: float32(duration),
})
}
}
return testEntries
}
// Splits `testEntries` up into maps indexed first by environment and then by test.
func splitData(testEntries []testEntry) splitEntryMap {
splitEntries := make(splitEntryMap)
for _, entry := range testEntries {
appendEntry(splitEntries, entry.environment, entry.name, entry)
}
return splitEntries
}
// Appends `entry` to `splitEntries` at the `environment` and `test`.
func appendEntry(splitEntries splitEntryMap, environment, test string, entry testEntry) {
// Lookup the environment.
environmentSplit, ok := splitEntries[environment]
if !ok {
// If the environment map is missing, make a map for this environment and store it.
environmentSplit = make(map[string][]testEntry)
splitEntries[environment] = environmentSplit
}
// Lookup the test.
testSplit, ok := environmentSplit[test]
if !ok {
// If the test is missing, make a slice for this test.
testSplit = make([]testEntry, 0)
// The slice is not inserted, since it will be replaced anyway.
}
environmentSplit[test] = append(testSplit, entry)
}
// Filters `splitEntries` to include only the most recent `date_range` dates.
func filterRecentEntries(splitEntries splitEntryMap, dateRange uint) splitEntryMap {
filteredEntries := make(splitEntryMap)
for environment, environmentSplit := range splitEntries {
for test, testSplit := range environmentSplit {
dates := make([]time.Time, len(testSplit))
for _, entry := range testSplit {
dates = append(dates, entry.date)
}
// Sort dates from future to past.
sort.Slice(dates, func(i, j int) bool {
return dates[j].Before(dates[i])
})
datesInRange := make([]time.Time, 0, dateRange)
var lastDate time.Time
// Go through each date.
for _, date := range dates {
// If date is the same as last date, ignore it.
if date.Equal(lastDate) {
continue
}
// Add the date.
datesInRange = append(datesInRange, date)
lastDate = date
// If the date_range has been hit, break out.
if uint(len(datesInRange)) == dateRange {
break
}
}
for _, entry := range testSplit {
// Look for the first element <= entry.date
index := sort.Search(len(datesInRange), func(i int) bool {
return !datesInRange[i].After(entry.date)
})
// If no date is <= entry.date, or the found date does not equal entry.date.
if index == len(datesInRange) || !datesInRange[index].Equal(entry.date) {
continue
}
appendEntry(filteredEntries, environment, test, entry)
}
}
}
return filteredEntries
}
// Computes the flake rates over each entry in `splitEntries`.
func computeFlakeRates(splitEntries splitEntryMap) map[string]map[string]float32 {
flakeRates := make(map[string]map[string]float32)
for environment, environmentSplit := range splitEntries {
for test, testSplit := range environmentSplit {
failures := 0
for _, entry := range testSplit {
if entry.status == "Failed" {
failures++
}
}
setValue(flakeRates, environment, test, float32(failures)/float32(len(testSplit)))
}
}
return flakeRates
}
// Computes the average durations over each entry in `splitEntries`.
func computeAverageDurations(splitEntries splitEntryMap) map[string]map[string]float32 {
averageDurations := make(map[string]map[string]float32)
for environment, environmentSplit := range splitEntries {
for test, testSplit := range environmentSplit {
durationSum := float32(0)
for _, entry := range testSplit {
durationSum += entry.duration
}
if len(testSplit) != 0 {
durationSum /= float32(len(testSplit))
}
setValue(averageDurations, environment, test, durationSum)
}
}
return averageDurations
}
// Sets the `value` of keys `environment` and `test` in `mapEntries`.
func setValue(mapEntries map[string]map[string]float32, environment, test string, value float32) {
// Lookup the environment.
environmentRates, ok := mapEntries[environment]
if !ok {
// If the environment map is missing, make a map for this environment and store it.
environmentRates = make(map[string]float32)
mapEntries[environment] = environmentRates
}
environmentRates[test] = value
}
// exit will exit and clean up minikube
func exit(msg string, err error) {
fmt.Printf("WithError(%s)=%v called from:\n%s", msg, err, debug.Stack())
os.Exit(60)
}

View File

@ -0,0 +1,492 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"strings"
"testing"
"time"
)
func simpleDate(year int, day int) time.Time {
return time.Date(year, time.January, day, 0, 0, 0, 0, time.UTC)
}
func compareEntrySlices(t *testing.T, actualData, expectedData []testEntry, extra string) {
if extra != "" {
extra = fmt.Sprintf(" (%s)", extra)
}
for i, actual := range actualData {
if len(expectedData) <= i {
t.Errorf("Received unmatched actual element at index %d%s. Actual: %v", i, extra, actual)
continue
}
expected := expectedData[i]
if actual != expected {
t.Errorf("Elements differ at index %d%s. Expected: %v, Actual: %v", i, extra, expected, actual)
}
}
if len(actualData) < len(expectedData) {
for i := len(actualData); i < len(expectedData); i++ {
t.Errorf("Missing unmatched expected element at index %d%s. Expected: %v", i, extra, expectedData[i])
}
}
}
func TestReadData(t *testing.T) {
actualData := readData(strings.NewReader(
`A,B,C,D,E,F
hash,2000-01-01,env1,test1,Passed,1
hash,2001-01-01,env2,test2,Failed,0.5
hash,,,test1,,0.6
hash,2002-01-01,,,Passed,0.9
hash,2003-01-01,env3,test3,Passed,2`,
))
expectedData := []testEntry{
{
name: "test1",
environment: "env1",
date: simpleDate(2000, 1),
status: "Passed",
duration: 1,
},
{
name: "test2",
environment: "env2",
date: simpleDate(2001, 1),
status: "Failed",
duration: 0.5,
},
{
name: "test1",
environment: "env2",
date: simpleDate(2001, 1),
status: "Failed",
duration: 0.6,
},
{
name: "test1",
environment: "env2",
date: simpleDate(2002, 1),
status: "Passed",
duration: 0.9,
},
{
name: "test3",
environment: "env3",
date: simpleDate(2003, 1),
status: "Passed",
duration: 2,
},
}
compareEntrySlices(t, actualData, expectedData, "")
}
func compareSplitData(t *testing.T, actual, expected splitEntryMap) {
for environment, actualTests := range actual {
expectedTests, environmentOk := expected[environment]
if !environmentOk {
t.Errorf("Unexpected environment %s in actual", environment)
continue
}
for test, actualEntries := range actualTests {
expectedEntries, testOk := expectedTests[test]
if !testOk {
t.Errorf("Unexpected test %s (in environment %s) in actual", test, environment)
continue
}
compareEntrySlices(t, actualEntries, expectedEntries, fmt.Sprintf("environment %s, test %s", environment, test))
}
for test := range expectedTests {
_, testOk := actualTests[test]
if !testOk {
t.Errorf("Missing expected test %s (in environment %s) in actual", test, environment)
}
}
}
for environment := range expected {
_, environmentOk := actual[environment]
if !environmentOk {
t.Errorf("Missing expected environment %s in actual", environment)
}
}
}
func TestSplitData(t *testing.T) {
entryE1T1_1, entryE1T1_2 := testEntry{
name: "test1",
environment: "env1",
date: simpleDate(2000, 1),
status: "Passed",
}, testEntry{
name: "test1",
environment: "env1",
date: simpleDate(2000, 2),
status: "Passed",
}
entryE1T2 := testEntry{
name: "test2",
environment: "env1",
date: simpleDate(2000, 1),
status: "Passed",
}
entryE2T1 := testEntry{
name: "test1",
environment: "env2",
date: simpleDate(2000, 1),
status: "Passed",
}
entryE2T2 := testEntry{
name: "test2",
environment: "env2",
date: simpleDate(2000, 1),
status: "Passed",
}
actual := splitData([]testEntry{entryE1T1_1, entryE1T1_2, entryE1T2, entryE2T1, entryE2T2})
expected := splitEntryMap{
"env1": {
"test1": {entryE1T1_1, entryE1T1_2},
"test2": {entryE1T2},
},
"env2": {
"test1": {entryE2T1},
"test2": {entryE2T2},
},
}
compareSplitData(t, actual, expected)
}
func TestFilterRecentEntries(t *testing.T) {
entryE1T1R1, entryE1T1R2, entryE1T1R3, entryE1T1O1, entryE1T1O2 := testEntry{
name: "test1",
environment: "env1",
date: simpleDate(2000, 4),
status: "Passed",
}, testEntry{
name: "test1",
environment: "env1",
date: simpleDate(2000, 3),
status: "Passed",
}, testEntry{
name: "test1",
environment: "env1",
date: simpleDate(2000, 3),
status: "Passed",
}, testEntry{
name: "test1",
environment: "env1",
date: simpleDate(2000, 2),
status: "Passed",
}, testEntry{
name: "test1",
environment: "env1",
date: simpleDate(2000, 1),
status: "Passed",
}
entryE1T2R1, entryE1T2R2, entryE1T2O1 := testEntry{
name: "test2",
environment: "env1",
date: simpleDate(2001, 3),
status: "Passed",
}, testEntry{
name: "test2",
environment: "env1",
date: simpleDate(2001, 2),
status: "Passed",
}, testEntry{
name: "test2",
environment: "env1",
date: simpleDate(2001, 1),
status: "Passed",
}
entryE2T2R1, entryE2T2R2, entryE2T2O1 := testEntry{
name: "test2",
environment: "env2",
date: simpleDate(2003, 3),
status: "Passed",
}, testEntry{
name: "test2",
environment: "env2",
date: simpleDate(2003, 2),
status: "Passed",
}, testEntry{
name: "test2",
environment: "env2",
date: simpleDate(2003, 1),
status: "Passed",
}
actualData := filterRecentEntries(splitEntryMap{
"env1": {
"test1": {
entryE1T1R1,
entryE1T1R2,
entryE1T1R3,
entryE1T1O1,
entryE1T1O2,
},
"test2": {
entryE1T2R1,
entryE1T2R2,
entryE1T2O1,
},
},
"env2": {
"test2": {
entryE2T2R1,
entryE2T2R2,
entryE2T2O1,
},
},
}, 2)
expectedData := splitEntryMap{
"env1": {
"test1": {
entryE1T1R1,
entryE1T1R2,
entryE1T1R3,
},
"test2": {
entryE1T2R1,
entryE1T2R2,
},
},
"env2": {
"test2": {
entryE2T2R1,
entryE2T2R2,
},
},
}
compareSplitData(t, actualData, expectedData)
}
func compareValues(t *testing.T, actualValues, expectedValues map[string]map[string]float32) {
for environment, actualTests := range actualValues {
expectedTests, environmentOk := expectedValues[environment]
if !environmentOk {
t.Errorf("Unexpected environment %s in actual", environment)
continue
}
for test, actualValue := range actualTests {
expectedValue, testOk := expectedTests[test]
if !testOk {
t.Errorf("Unexpected test %s (in environment %s) in actual", test, environment)
continue
}
if actualValue != expectedValue {
t.Errorf("Wrong value at environment %s and test %s. Expected: %v, Actual: %v", environment, test, expectedValue, actualValue)
}
}
for test := range expectedTests {
_, testOk := actualTests[test]
if !testOk {
t.Errorf("Missing expected test %s (in environment %s) in actual", test, environment)
}
}
}
for environment := range expectedValues {
_, environmentOk := actualValues[environment]
if !environmentOk {
t.Errorf("Missing expected environment %s in actual", environment)
}
}
}
func TestComputeFlakeRates(t *testing.T) {
actualData := computeFlakeRates(splitEntryMap{
"env1": {
"test1": {
{
name: "test1",
environment: "env1",
date: simpleDate(2000, 4),
status: "Passed",
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 3),
status: "Passed",
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 3),
status: "Passed",
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 2),
status: "Passed",
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 1),
status: "Failed",
},
},
"test2": {
{
name: "test2",
environment: "env1",
date: simpleDate(2001, 3),
status: "Failed",
}, {
name: "test2",
environment: "env1",
date: simpleDate(2001, 2),
status: "Failed",
}, {
name: "test2",
environment: "env1",
date: simpleDate(2001, 1),
status: "Failed",
},
},
},
"env2": {
"test2": {
{
name: "test2",
environment: "env2",
date: simpleDate(2003, 3),
status: "Passed",
}, testEntry{
name: "test2",
environment: "env2",
date: simpleDate(2003, 2),
status: "Failed",
},
},
},
})
expectedData := map[string]map[string]float32{
"env1": {
"test1": 0.2,
"test2": 1,
},
"env2": {
"test2": 0.5,
},
}
compareValues(t, actualData, expectedData)
}
func TestComputeAverageDurations(t *testing.T) {
actualData := computeAverageDurations(splitEntryMap{
"env1": {
"test1": {
{
name: "test1",
environment: "env1",
date: simpleDate(2000, 4),
status: "Passed",
duration: 1,
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 3),
status: "Passed",
duration: 2,
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 3),
status: "Passed",
duration: 3,
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 2),
status: "Passed",
duration: 3,
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 1),
status: "Failed",
duration: 3,
},
},
"test2": {
{
name: "test2",
environment: "env1",
date: simpleDate(2001, 3),
status: "Failed",
duration: 1,
}, {
name: "test2",
environment: "env1",
date: simpleDate(2001, 2),
status: "Failed",
duration: 3,
}, {
name: "test2",
environment: "env1",
date: simpleDate(2001, 1),
status: "Failed",
duration: 3,
},
},
},
"env2": {
"test2": {
{
name: "test2",
environment: "env2",
date: simpleDate(2003, 3),
status: "Passed",
duration: 0.5,
}, testEntry{
name: "test2",
environment: "env2",
date: simpleDate(2003, 2),
status: "Failed",
duration: 1.5,
},
},
},
})
expectedData := map[string]map[string]float32{
"env1": {
"test1": float32(12) / float32(5),
"test2": float32(7) / float32(3),
},
"env2": {
"test2": 1,
},
}
compareValues(t, actualData, expectedData)
}

View File

@ -0,0 +1,9 @@
<html>
<head>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
</head>
<body>
<div id="chart_div"></div>
</body>
<script src="flake_chart.js"></script>
</html>

View File

@ -0,0 +1,194 @@
// Displays an error message to the UI. Any previous message will be erased.
function displayError(message) {
console.error(message);
}
// Creates a generator that reads the response body one line at a time.
async function* bodyByLinesIterator(response) {
// TODO: Replace this with something that actually reads the body line by line
// (since the file can be big).
const lines = (await response.text()).split("\n");
for (let line of lines) {
// Skip any empty lines (most likely at the end).
if (line !== "") {
yield line;
}
}
}
// Determines whether `str` matches at least one value in `enumObject`.
function isValidEnumValue(enumObject, str) {
for (const enumKey in enumObject) {
if (enumObject[enumKey] === str) {
return true;
}
}
return false;
}
// Enum for test status.
const testStatus = {
PASSED: "Passed",
FAILED: "Failed",
SKIPPED: "Skipped"
}
async function loadTestData() {
const response = await fetch("data.csv");
if (!response.ok) {
const responseText = await response.text();
throw `Failed to fetch data from GCS bucket. Error: ${responseText}`;
}
const lines = bodyByLinesIterator(response);
// Consume the header to ensure the data has the right number of fields.
const header = (await lines.next()).value;
if (header.split(",").length != 6) {
throw `Fetched CSV data contains wrong number of fields. Expected: 6. Actual Header: "${header}"`;
}
const testData = [];
let lineData = ["", "", "", "", "", ""];
for await (const line of lines) {
let splitLine = line.split(",");
if (splitLine.length != 6) {
console.warn(`Found line with wrong number of fields. Actual: ${splitLine.length} Expected: 6. Line: "${line}"`);
continue;
}
splitLine = splitLine.map((value, index) => value === "" ? lineData[index] : value);
lineData = splitLine;
if (!isValidEnumValue(testStatus, splitLine[4])) {
console.warn(`Invalid test status provided. Actual: ${splitLine[4]} Expected: One of ${Object.values(testStatus).join(", ")}`);
continue;
}
testData.push({
commit: splitLine[0],
date: new Date(splitLine[1]),
environment: splitLine[2],
name: splitLine[3],
status: splitLine[4],
duration: Number(splitLine[5]),
});
}
if (testData.length == 0) {
throw "Fetched CSV data is empty or poorly formatted.";
}
return testData;
}
// Computes the average of an array of numbers.
Array.prototype.average = function () {
return this.length === 0 ? 0 : this.reduce((sum, value) => sum + value, 0) / this.length;
};
// Groups array elements by keys obtained through `keyGetter`.
Array.prototype.groupBy = function (keyGetter) {
return Array.from(this.reduce((mapCollection, element) => {
const key = keyGetter(element);
if (mapCollection.has(key)) {
mapCollection.get(key).push(element);
} else {
mapCollection.set(key, [element]);
}
return mapCollection;
}, new Map()).values());
};
// Parse URL search `query` into [{key, value}].
function parseUrlQuery(query) {
if (query[0] === '?') {
query = query.substring(1);
}
return Object.fromEntries((query === "" ? [] : query.split("&")).map(element => {
const keyValue = element.split("=");
return [unescape(keyValue[0]), unescape(keyValue[1])];
}));
}
async function init() {
google.charts.load('current', { 'packages': ['corechart'] });
let testData;
try {
// Wait for Google Charts to load, and for test data to load.
// Only store the test data (at index 1) into `testData`.
testData = (await Promise.all([
new Promise(resolve => google.charts.setOnLoadCallback(resolve)),
loadTestData()
]))[1];
} catch (err) {
displayError(err);
return;
}
const data = new google.visualization.DataTable();
data.addColumn('date', 'Date');
data.addColumn('number', 'Flake Percentage');
data.addColumn({ type: 'string', role: 'tooltip', 'p': { 'html': true } });
data.addColumn('number', 'Duration');
data.addColumn({ type: 'string', role: 'tooltip', 'p': { 'html': true } });
const query = parseUrlQuery(window.location.search);
const desiredTest = query.test || "", desiredEnvironment = query.env || "";
const groups = testData
// Filter to only contain unskipped runs of the requested test and requested environment.
.filter(test => test.name === desiredTest && test.environment === desiredEnvironment && test.status !== testStatus.SKIPPED)
.groupBy(test => test.date.getTime());
data.addRows(
groups
// Sort by run date, past to future.
.sort((a, b) => a[0].date - b[0].date)
// Map each group to all variables need to format the rows.
.map(tests => ({
date: tests[0].date, // Get one of the dates from the tests (which will all be the same).
flakeRate: tests.map(test => test.status === testStatus.FAILED ? 100 : 0).average(), // Compute average of runs where FAILED counts as 100%.
duration: tests.map(test => test.duration).average(), // Compute average duration of runs.
commitHashes: tests.map(test => ({ // Take all hashes, statuses, and durations of tests in this group.
hash: test.commit,
status: test.status,
duration: test.duration
}))
}))
.map(groupData => [
groupData.date,
groupData.flakeRate,
`<div class="py-2 ps-2">
<b>${groupData.date.toString()}</b><br>
<b>Flake Percentage:</b> ${groupData.flakeRate.toFixed(2)}%<br>
<b>Hashes:</b><br>
${groupData.commitHashes.map(({ hash, status }) => ` - ${hash} (${status})`).join("<br>")}
</div>`,
groupData.duration,
`<div class="py-2 ps-2">
<b>${groupData.date.toString()}</b><br>
<b>Average Duration:</b> ${groupData.duration.toFixed(2)}s<br>
<b>Hashes:</b><br>
${groupData.commitHashes.map(({ hash, duration }) => ` - ${hash} (${duration}s)`).join("<br>")}
</div>`,
])
);
const options = {
title: `Flake rate and duration by day of ${desiredTest} on ${desiredEnvironment}`,
width: window.innerWidth,
height: window.innerHeight,
pointSize: 10,
pointShape: "circle",
series: {
0: { targetAxisIndex: 0 },
1: { targetAxisIndex: 1 },
},
vAxes: {
0: { title: "Flake rate", minValue: 0, maxValue: 100 },
1: { title: "Duration (seconds)" },
},
colors: ['#dc3912', '#3366cc'],
tooltip: { trigger: "selection", isHtml: true }
};
const chart = new google.visualization.LineChart(document.getElementById('chart_div'));
chart.draw(data, options);
}
init();

View File

@ -0,0 +1,31 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Takes a CSV file through stdin, compresses it and writes it to stdout.
# Example usage: < data.csv ./optimize_data.sh > data_optimized.csv
set -eu -o pipefail
# Take input CSV. For each field, if it is the same as the previous row, replace it with an empty string.
# This is to compress the input CSV. Example:
# Input:
# hash,2021-06-10,Docker_Linux,TestFunctional,Passed,0.5
# hash,2021-06-10,Docker_Linux_containerd,TestFunctional,Failed,0.6
#
# Output:
# hash,2021-06-10,Docker_Linux,TestFunctional,Passed,0.5
# ,,DockerLinux_containerd,,Failed,0.6
awk -F, 'BEGIN {OFS = FS} { for(i=1; i<=NF; i++) { if($i == j[i]) { $i = ""; } else { j[i] = $i; } } printf "%s\n",$0 }'

View File

@ -0,0 +1,32 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Takes a series of gopogh summary jsons, and formats them into a CSV file with
# a row for each test.
# Example usage: cat gopogh_1.json gopogh_2.json gopogh_3.json | ./process_data.sh
set -eu -o pipefail
# Print header.
printf "Commit Hash,Test Date,Environment,Test,Status,Duration\n"
# Turn each test in each summary file to a CSV line containing its commit hash, date, environment, test, status, and duration.
# Example line:
# 247982745892,2021-06-10,Docker_Linux,TestFunctional,Passed,0.5
jq -r '((.PassedTests[]? as $name | {commit: (.Detail.Details | split(":") | .[0]), date: (.Detail.Details | split(":") | .[1]), environment: .Detail.Name, test: $name, duration: .Durations[$name], status: "Passed"}),
(.FailedTests[]? as $name | {commit: (.Detail.Details | split(":") | .[0]), date: (.Detail.Details | split(":") | .[1]), environment: .Detail.Name, test: $name, duration: .Durations[$name], status: "Failed"}),
(.SkippedTests[]? as $name | {commit: (.Detail.Details | split(":") | .[0]), date: (.Detail.Details | split(":") | .[1]), environment: .Detail.Name, test: $name, duration: 0, status: "Skipped"}))
| .commit + "," + .date + "," + .environment + "," + .test + "," + .status + "," + (.duration | tostring)'

View File

@ -0,0 +1,87 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Creates a comment on the provided PR number, using the provided gopogh summary
# to list out the flake rates of all failing tests.
# Example usage: ./report_flakes.sh 11602 gopogh.json Docker_Linux
set -eu -o pipefail
if [ "$#" -ne 3 ]; then
echo "Wrong number of arguments. Usage: report_flakes.sh <PR number> <gopogh_summary.json> <environment>" 1>&2
exit 1
fi
PR_NUMBER=$1
SUMMARY_DATA=$2
ENVIRONMENT=$3
# To prevent having a super-long comment, add a maximum number of tests to report.
MAX_REPORTED_TESTS=30
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
TMP_DATA=$(mktemp)
# 1) Process the data in the gopogh summary.
# 2) Filter tests to only include failed tests on the environment (and only get their names).
# 3) Sort the names of the tests.
# 4) Store in file $TMP_DATA.
< "$SUMMARY_DATA" $DIR/process_data.sh \
| sed -n -r -e "s/[0-9a-f]*,[0-9-]*,$ENVIRONMENT,([a-zA-Z\/_-]*),Failed,[.0-9]*/\1/p" \
| sort \
> "$TMP_DATA"
# Download the precomputed flake rates from the GCS bucket into file $TMP_FLAKE_RATES.
TMP_FLAKE_RATES=$(mktemp)
gsutil cp gs://minikube-flake-rate/flake_rates.csv "$TMP_FLAKE_RATES"
TMP_FAILED_RATES="$TMP_FLAKE_RATES\_filtered"
# 1) Parse/filter the flake rates to only include the test name and flake rates for environment.
# 2) Sort the flake rates based on test name.
# 3) Join the flake rates with the failing tests to only get flake rates of failing tests.
# 4) Sort failed test flake rates based on the flakiness of that test - stable tests should be first on the list.
# 5) Store in file $TMP_FAILED_RATES.
< "$TMP_FLAKE_RATES" sed -n -r -e "s/$ENVIRONMENT,([a-zA-Z\/_-]*),([.0-9]*),[.0-9]*/\1,\2/p" \
| sort -t, -k1,1 \
| join -t , -j 1 "$TMP_DATA" - \
| sort -g -t, -k2,2 \
> "$TMP_FAILED_RATES"
FAILED_RATES_LINES=$(wc -l < "$TMP_FAILED_RATES")
if [[ "$FAILED_RATES_LINES" -gt 30 ]]; then
echo "No failed tests! Aborting without commenting..." 1>&2
exit 0
fi
# Create the comment template.
TMP_COMMENT=$(mktemp)
printf "These are the flake rates of all failed tests on %s.\n|Failed Tests|Flake Rate (%%)|\n|---|---|\n" "$ENVIRONMENT" > "$TMP_COMMENT"
# 1) Get the first $MAX_REPORTED_TESTS lines.
# 2) Print a row in the table with the test name, flake rate, and a link to the flake chart for that test.
# 3) Append these rows to file $TMP_COMMENT.
< "$TMP_FAILED_RATES" head -n $MAX_REPORTED_TESTS \
| sed -n -r -e "s/([a-zA-Z\/_-]*),([.0-9]*)/|\1|\2 ([chart](https:\/\/storage.googleapis.com\/minikube-flake-rate\/flake_chart.html?env=$ENVIRONMENT\&test=\1))|/p" \
>> "$TMP_COMMENT"
# If there are too many failing tests, add an extra row explaining this, and a message after the table.
if [[ "$FAILED_RATES_LINES" -gt 30 ]]; then
printf "|More tests...|Continued...|\n\nToo many tests failed - See test logs for more details." >> "$TMP_COMMENT"
fi
# install gh if not present
$DIR/../installers/check_install_gh.sh
gh pr comment "https://github.com/kubernetes/minikube/pull/$PR_NUMBER" --body "$(cat $TMP_COMMENT)"

View File

@ -0,0 +1,43 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Takes a gopogh summary, extracts test data as a CSV and appends to the
# existing CSV data in the GCS bucket.
# Example usage: ./jenkins_upload_tests.sh gopogh_summary.json
set -eu -o pipefail
if [ "$#" -ne 1 ]; then
echo "Wrong number of arguments. Usage: jenkins_upload_tests.sh <gopogh_summary.json>" 1>&2
exit 1
fi
TMP_DATA=$(mktemp)
# Use the gopogh summary, process it, optimize the data, remove the header, and store.
<"$1" ./test-flake-chart/process_data.sh \
| ./test-flake-chart/optimize_data.sh \
| sed "1d" > $TMP_DATA
GCS_TMP="gs://minikube-flake-rate/$(basename "$TMP_DATA")"
# Copy data to append to GCS
gsutil cp $TMP_DATA $GCS_TMP
# Append data to existing data.
gsutil compose gs://minikube-flake-rate/data.csv $GCS_TMP gs://minikube-flake-rate/data.csv
# Clear all the temp stuff.
rm $TMP_DATA
gsutil rm $GCS_TMP

View File

@ -47,3 +47,7 @@ gsutil -qm cp "${HTML_OUT}" "gs://${JOB_GCS_BUCKET}.html" || true
SUMMARY_OUT="$ARTIFACTS/summary.txt"
echo ">> uploading ${SUMMARY_OUT}"
gsutil -qm cp "${SUMMARY_OUT}" "gs://${JOB_GCS_BUCKET}_summary.json" || true
if [[ "${MINIKUBE_LOCATION}" == "master" ]]; then
./test-flake-chart/jenkins_upload_tests.sh "${SUMMARY_OUT}"
fi

View File

@ -232,7 +232,7 @@ func apiServerHealthzNow(hostname string, port int) (state.State, error) {
Proxy: nil, // Avoid using a proxy to speak to a local host
TLSClientConfig: &tls.Config{RootCAs: pool},
}
client := &http.Client{Transport: tr}
client := &http.Client{Transport: tr, Timeout: 5 * time.Second}
resp, err := client.Get(url)
// Connection refused, usually.
if err != nil {

View File

@ -92,3 +92,16 @@ Yes! If you prefer not having emoji in your minikube output 😔 , just set the
MINIKUBE_IN_STYLE=0 minikube start
```
## How can I access a minikube cluster from a remote network?
minikube's primary goal is to quickly set up local Kubernetes clusters, and therefore we strongly discourage using minikube in production or for listening to remote traffic. By design, minikube is meant to only listen on the local network.
However, it is possible to configure minikube to listen on a remote network. This will open your network to the outside world and is not recommended. If you are not fully aware of the security implications, please avoid using this.
For the docker and podman driver, use `--listen-address` flag:
```
minikube start --listen-address=0.0.0.0
```

View File

@ -16,7 +16,7 @@ Start a cluster by running:
minikube start
```
Access the Kubernetes Dashboard running within the minikube cluster:
Access the Kubernetes dashboard running within the minikube cluster:
```shell
minikube dashboard

View File

@ -625,7 +625,7 @@
"The heapster addon is depreciated. please try to disable metrics-server instead": "",
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "Der Name des virtuellen Hyperv-Switch. Standardmäßig zuerst gefunden. (nur Hyperv-Treiber)",
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "",
"The image you are trying to add {{.imageName}} doesn't exist!": "",
"The image '{{.imageName}}' was not found; unable to add it to cache.": "",
"The initial time interval for each check that wait performs in seconds": "",
"The kubeadm binary within the Docker container is not executable": "",
"The kubernetes version that the minikube VM will use (ex: v1.2.3)": "Die von der minikube-VM verwendete Kubernetes-Version (Beispiel: v1.2.3)",

View File

@ -630,7 +630,7 @@
"The heapster addon is depreciated. please try to disable metrics-server instead": "",
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "El nombre del conmutador virtual de hyperv. El valor predeterminado será el primer nombre que se encuentre (solo con el controlador de hyperv).",
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "",
"The image you are trying to add {{.imageName}} doesn't exist!": "",
"The image '{{.imageName}}' was not found; unable to add it to cache.": "",
"The initial time interval for each check that wait performs in seconds": "",
"The kubeadm binary within the Docker container is not executable": "",
"The kubernetes version that the minikube VM will use (ex: v1.2.3)": "La versión de Kubernetes que utilizará la VM de minikube (p. ej.: versión 1.2.3)",

View File

@ -628,7 +628,7 @@
"The heapster addon is depreciated. please try to disable metrics-server instead": "",
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "Nom du commutateur virtuel hyperv. La valeur par défaut affiche le premier commutateur trouvé (pilote hyperv uniquement).",
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "",
"The image you are trying to add {{.imageName}} doesn't exist!": "",
"The image '{{.imageName}}' was not found; unable to add it to cache.": "",
"The initial time interval for each check that wait performs in seconds": "",
"The kubeadm binary within the Docker container is not executable": "",
"The kubernetes version that the minikube VM will use (ex: v1.2.3)": "Version de Kubernetes qu'utilisera la VM minikube (exemple : v1.2.3).",

View File

@ -624,7 +624,7 @@
"The heapster addon is depreciated. please try to disable metrics-server instead": "",
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "hyperv 仮想スイッチ名。最初に見つかったものにデフォルト設定されますhyperv ドライバのみ)",
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "",
"The image you are trying to add {{.imageName}} doesn't exist!": "",
"The image '{{.imageName}}' was not found; unable to add it to cache.": "",
"The initial time interval for each check that wait performs in seconds": "",
"The kubeadm binary within the Docker container is not executable": "",
"The kubernetes version that the minikube VM will use (ex: v1.2.3)": "minikube VM で使用される Kubernetes バージョン(例: v1.2.3",

View File

@ -33,8 +33,7 @@
"A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "",
"A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "",
"A set of key=value pairs that describe feature gates for alpha/experimental features.": "",
"Access the Kubernetes dashboard running within the minikube cluster": "",
"Access the kubernetes dashboard running within the minikube cluster": "minikube 클러스터 내의 쿠버네티스 대시보드에 접근합니다",
"Access the Kubernetes dashboard running within the minikube cluster": "minikube 클러스터 내의 쿠버네티스 대시보드에 접근합니다",
"Access to ports below 1024 may fail on Windows with OpenSSH clients older than v8.1. For more information, see: https://minikube.sigs.k8s.io/docs/handbook/accessing/#access-to-ports-1024-on-windows-requires-root-permission": "",
"Add SSH identity key to SSH authentication agent": "SSH 인증 에이전트에 SSH ID 키 추가합니다",
"Add an image to local cache.": "로컬 캐시에 이미지를 추가합니다",
@ -639,7 +638,7 @@
"The heapster addon is depreciated. please try to disable metrics-server instead": "",
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "",
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "",
"The image you are trying to add {{.imageName}} doesn't exist!": "",
"The image '{{.imageName}}' was not found; unable to add it to cache.": "",
"The initial time interval for each check that wait performs in seconds": "",
"The kubeadm binary within the Docker container is not executable": "",
"The machine-driver specified is failing to start. Try running 'docker-machine-driver-\u003ctype\u003e version'": "",

View File

@ -32,8 +32,7 @@
"A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "",
"A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "",
"A set of key=value pairs that describe feature gates for alpha/experimental features.": "",
"Access the Kubernetes dashboard running within the minikube cluster": "",
"Access the kubernetes dashboard running within the minikube cluster": "Dostęp do dashboardu uruchomionego w klastrze kubernetesa w minikube",
"Access the Kubernetes dashboard running within the minikube cluster": "Dostęp do dashboardu uruchomionego w klastrze kubernetesa w minikube",
"Access to ports below 1024 may fail on Windows with OpenSSH clients older than v8.1. For more information, see: https://minikube.sigs.k8s.io/docs/handbook/accessing/#access-to-ports-1024-on-windows-requires-root-permission": "",
"Add SSH identity key to SSH authentication agent": "",
"Add an image to local cache.": "Dodaj obraz do lokalnego cache",
@ -643,7 +642,7 @@
"The heapster addon is depreciated. please try to disable metrics-server instead": "",
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "",
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "",
"The image you are trying to add {{.imageName}} doesn't exist!": "",
"The image '{{.imageName}}' was not found; unable to add it to cache.": "",
"The initial time interval for each check that wait performs in seconds": "",
"The kubeadm binary within the Docker container is not executable": "",
"The kubernetes version that the minikube VM will use (ex: v1.2.3)": "Wersja kubernetesa, która zostanie użyta przez wirtualną maszynę minikube (np. v1.2.3)",

View File

@ -585,7 +585,7 @@
"The heapster addon is depreciated. please try to disable metrics-server instead": "",
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "",
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "",
"The image you are trying to add {{.imageName}} doesn't exist!": "",
"The image '{{.imageName}}' was not found; unable to add it to cache.": "",
"The initial time interval for each check that wait performs in seconds": "",
"The kubeadm binary within the Docker container is not executable": "",
"The machine-driver specified is failing to start. Try running 'docker-machine-driver-\u003ctype\u003e version'": "",

View File

@ -39,8 +39,7 @@
"A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine": "一组在为 kubernetes 生成的证书中使用的 apiserver 名称。如果您希望将此 apiserver 设置为可从机器外部访问,则可以使用这组 apiserver 名称",
"A set of key=value pairs that describe configuration that may be passed to different components.\nThe key should be '.' separated, and the first part before the dot is the component to apply the configuration to.\nValid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler\nValid kubeadm parameters:": "一组用于描述可传递给不同组件的配置的键值对。\n其中键应以英文句点“.”分隔,英文句点前面的第一个部分是应用该配置的组件。\n有效组件包括kubelet、kubeadm、apiserver、controller-manager、etcd、proxy、scheduler\n有效 kubeadm 参数包括:",
"A set of key=value pairs that describe feature gates for alpha/experimental features.": "一组用于描述 alpha 版功能/实验性功能的功能限制的键值对。",
"Access the Kubernetes dashboard running within the minikube cluster": "",
"Access the kubernetes dashboard running within the minikube cluster": "访问在 minikube 集群中运行的 kubernetes dashboard",
"Access the Kubernetes dashboard running within the minikube cluster": "访问在 minikube 集群中运行的 kubernetes dashboard",
"Access to ports below 1024 may fail on Windows with OpenSSH clients older than v8.1. For more information, see: https://minikube.sigs.k8s.io/docs/handbook/accessing/#access-to-ports-1024-on-windows-requires-root-permission": "",
"Add SSH identity key to SSH authentication agent": "",
"Add an image to local cache.": "将 image 添加到本地缓存。",
@ -732,7 +731,7 @@
"The heapster addon is depreciated. please try to disable metrics-server instead": "",
"The hyperv virtual switch name. Defaults to first found. (hyperv driver only)": "hyperv 虚拟交换机名称。默认为找到的第一个 hyperv 虚拟交换机。(仅限 hyperv 驱动程序)",
"The hypervisor does not appear to be configured properly. Run 'minikube start --alsologtostderr -v=1' and inspect the error code": "管理程序似乎配置的不正确。执行 'minikube start --alsologtostderr -v=1' 并且检查错误代码",
"The image you are trying to add {{.imageName}} doesn't exist!": "",
"The image '{{.imageName}}' was not found; unable to add it to cache.": "",
"The initial time interval for each check that wait performs in seconds": "",
"The kubeadm binary within the Docker container is not executable": "",
"The kubernetes version that the minikube VM will use (ex: v1.2.3)": "minikube 虚拟机将使用的 kubernetes 版本(例如 v1.2.3",