diff --git a/.github/workflows/time-to-k8s.yml b/.github/workflows/time-to-k8s.yml new file mode 100644 index 0000000000..0b4103ac71 --- /dev/null +++ b/.github/workflows/time-to-k8s.yml @@ -0,0 +1,18 @@ +name: "time-to-k8s benchmark" +on: + release: + types: [released] +jobs: + benchmark: + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + - name: Checkout submodules + run: git submodule update --init + - uses: actions/setup-go@v2 + with: + go-version: 1.16.5 + stable: true + - name: Benchmark + run: | + ./hack/benchmark/time-to-k8s/time-to-k8s.sh ${{ secrets.MINIKUBE_BOT_PAT }} diff --git a/.gitmodules b/.gitmodules index 0e99693233..d398a94cf9 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,6 @@ [submodule "site/themes/docsy"] path = site/themes/docsy url = https://github.com/google/docsy.git -[submodule "hack/benchmark/time-to-k8s/time-to-k8s"] - path = hack/benchmark/time-to-k8s/time-to-k8s +[submodule "hack/benchmark/time-to-k8s/time-to-k8s-repo"] + path = hack/benchmark/time-to-k8s/time-to-k8s-repo url = https://github.com/tstromberg/time-to-k8s.git diff --git a/Makefile b/Makefile index ce1c5dd247..db9a23088e 100644 --- a/Makefile +++ b/Makefile @@ -40,7 +40,7 @@ KVM_GO_VERSION ?= $(GO_VERSION:.0=) INSTALL_SIZE ?= $(shell du out/minikube-windows-amd64.exe | cut -f1) BUILDROOT_BRANCH ?= 2020.02.12 -REGISTRY?=gcr.io/k8s-minikube +REGISTRY ?= gcr.io/k8s-minikube # Get git commit id COMMIT_NO := $(shell git rev-parse HEAD 2> /dev/null || true) @@ -705,6 +705,21 @@ KICBASE_IMAGE_GCR ?= $(REGISTRY)/kicbase:$(KIC_VERSION) KICBASE_IMAGE_HUB ?= kicbase/stable:$(KIC_VERSION) KICBASE_IMAGE_REGISTRIES ?= $(KICBASE_IMAGE_GCR) $(KICBASE_IMAGE_HUB) +.PHONY: local-kicbase +local-kicbase: deploy/kicbase/auto-pause ## Builds the kicbase image and tags it local/kicbase:latest and local/kicbase:$(KIC_VERSION)-$(COMMIT_SHORT) + docker build -f ./deploy/kicbase/Dockerfile -t local/kicbase:$(KIC_VERSION) --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --cache-from $(KICBASE_IMAGE_GCR) ./deploy/kicbase + docker tag local/kicbase:$(KIC_VERSION) local/kicbase:latest + docker tag local/kicbase:$(KIC_VERSION) local/kicbase:$(KIC_VERSION)-$(COMMIT_SHORT) + +SED = sed -i +ifeq ($(GOOS),darwin) + SED = sed -i '' +endif + +.PHONY: local-kicbase-debug +local-kicbase-debug: local-kicbase ## Builds a local kicbase image and switches source code to point to it + $(SED) 's|Version = .*|Version = \"$(KIC_VERSION)-$(COMMIT_SHORT)\"|;s|baseImageSHA = .*|baseImageSHA = \"\"|;s|gcrRepo = .*|gcrRepo = \"local/kicbase\"|;s|dockerhubRepo = .*|dockerhubRepo = \"local/kicbase\"|' pkg/drivers/kic/types.go + .PHONY: push-kic-base-image push-kic-base-image: deploy/kicbase/auto-pause docker-multi-arch-builder ## Push multi-arch local/kicbase:latest to all remote registries ifdef AUTOPUSH diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index b7c86852d3..ec657fe817 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -87,6 +87,24 @@ func (error DeletionError) Error() string { return error.Err.Error() } +var hostAndDirsDeleter = func(api libmachine.API, cc *config.ClusterConfig, profileName string) error { + if err := killMountProcess(); err != nil { + out.FailureT("Failed to kill mount process: {{.error}}", out.V{"error": err}) + } + + deleteHosts(api, cc) + + // In case DeleteHost didn't complete the job. + deleteProfileDirectory(profileName) + deleteMachineDirectories(cc) + + if err := deleteConfig(profileName); err != nil { + return err + } + + return deleteContext(profileName) +} + func init() { deleteCmd.Flags().BoolVar(&deleteAll, "all", false, "Set flag to delete all profiles") deleteCmd.Flags().BoolVar(&purge, "purge", false, "Set this flag to delete the '.minikube' folder from your user directory.") @@ -282,23 +300,10 @@ func deleteProfile(ctx context.Context, profile *config.Profile) error { } } - if err := killMountProcess(); err != nil { - out.FailureT("Failed to kill mount process: {{.error}}", out.V{"error": err}) - } - - deleteHosts(api, cc) - - // In case DeleteHost didn't complete the job. - deleteProfileDirectory(profile.Name) - deleteMachineDirectories(cc) - - if err := deleteConfig(profile.Name); err != nil { + if err := hostAndDirsDeleter(api, cc, profile.Name); err != nil { return err } - if err := deleteContext(profile.Name); err != nil { - return err - } out.Step(style.Deleted, `Removed all traces of the "{{.name}}" cluster.`, out.V{"name": profile.Name}) return nil } diff --git a/cmd/minikube/cmd/delete_test.go b/cmd/minikube/cmd/delete_test.go index 6c6a98939f..fff2ffabf8 100644 --- a/cmd/minikube/cmd/delete_test.go +++ b/cmd/minikube/cmd/delete_test.go @@ -17,15 +17,18 @@ limitations under the License. package cmd import ( + "fmt" "io/ioutil" "os" "path/filepath" "testing" + "github.com/docker/machine/libmachine" "github.com/google/go-cmp/cmp" "github.com/otiai10/copy" "github.com/spf13/viper" + cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/localpath" ) @@ -114,6 +117,7 @@ func TestDeleteProfile(t *testing.T) { t.Logf("load failure: %v", err) } + hostAndDirsDeleter = hostAndDirsDeleterMock errs := DeleteProfiles([]*config.Profile{profile}) if len(errs) > 0 { HandleDeletionErrors(errs) @@ -154,6 +158,17 @@ func TestDeleteProfile(t *testing.T) { } } +var hostAndDirsDeleterMock = func(api libmachine.API, cc *config.ClusterConfig, profileName string) error { + return deleteContextTest() +} + +func deleteContextTest() error { + if err := cmdcfg.Unset(config.ProfileName); err != nil { + return DeletionError{Err: fmt.Errorf("unset minikube profile: %v", err), Errtype: Fatal} + } + return nil +} + func TestDeleteAllProfiles(t *testing.T) { td, err := ioutil.TempDir("", "all") if err != nil { @@ -207,6 +222,7 @@ func TestDeleteAllProfiles(t *testing.T) { } profiles := append(validProfiles, inValidProfiles...) + hostAndDirsDeleter = hostAndDirsDeleterMock errs := DeleteProfiles(profiles) if errs != nil { diff --git a/cmd/minikube/cmd/root.go b/cmd/minikube/cmd/root.go index cfcd7f0276..5b81138d82 100644 --- a/cmd/minikube/cmd/root.go +++ b/cmd/minikube/cmd/root.go @@ -97,7 +97,7 @@ func Execute() { if runtime.GOOS == "darwin" && detect.IsAmd64M1Emulation() { exit.Message(reason.WrongBinaryM1, "You are trying to run amd64 binary on M1 system. Please use darwin/arm64 binary instead (Download at {{.url}}.)", - out.V{"url": notify.DownloadURL(version.GetVersion(), "darwin", "amd64")}) + out.V{"url": notify.DownloadURL(version.GetVersion(), "darwin", "arm64")}) } _, callingCmd := filepath.Split(os.Args[0]) diff --git a/hack/benchmark/time-to-k8s/time-to-k8s-repo b/hack/benchmark/time-to-k8s/time-to-k8s-repo new file mode 160000 index 0000000000..72506e9487 --- /dev/null +++ b/hack/benchmark/time-to-k8s/time-to-k8s-repo @@ -0,0 +1 @@ +Subproject commit 72506e948764aeeafc01e58e6bec0ea741c61ca0 diff --git a/hack/benchmark/time-to-k8s/time-to-k8s.sh b/hack/benchmark/time-to-k8s/time-to-k8s.sh index d999a6afc8..a16beea807 100755 --- a/hack/benchmark/time-to-k8s/time-to-k8s.sh +++ b/hack/benchmark/time-to-k8s/time-to-k8s.sh @@ -17,7 +17,7 @@ set -e install_kind() { - curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.0/kind-linux-amd64 + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.0/kind-linux-amd64 chmod +x ./kind sudo mv ./kind /usr/local } @@ -31,31 +31,58 @@ install_minikube() { sudo install ./out/minikube /usr/local/bin/minikube } +install_gh() { + export access_token="$1" + + # Make sure gh is installed and configured + ./hack/jenkins/installers/check_install_gh.sh +} + +config_git() { + git config user.name "minikube-bot" + git config user.email "minikube-bot@google.com" +} + +create_branch() { + git checkout -b addTimeToK8s"$1" +} + run_benchmark() { - ( cd ./hack/benchmark/time-to-k8s/time-to-k8s/ && + pwd + ( cd ./hack/benchmark/time-to-k8s/time-to-k8s-repo/ && git submodule update --init && go run . --config local-kubernetes.yaml --iterations 5 --output output.csv ) } generate_chart() { - go run ./hack/benchmark/time-to-k8s/chart.go --csv ./hack/benchmark/time-to-k8s/time-to-k8s/output.csv --output ./site/static/images/benchmarks/timeToK8s/"$1".png + go run ./hack/benchmark/time-to-k8s/chart.go --csv ./hack/benchmark/time-to-k8s/time-to-k8s-repo/output.csv --output ./site/static/images/benchmarks/timeToK8s/"$1".png } create_page() { printf -- "---\ntitle: \"%s Benchmark\"\nlinkTitle: \"%s Benchmark\"\nweight: 1\n---\n\n![time-to-k8s](/images/benchmarks/timeToK8s/%s.png)\n" "$1" "$1" "$1" > ./site/content/en/docs/benchmarks/timeToK8s/"$1".md } -commit_chart() { +commit_changes() { git add ./site/static/images/benchmarks/timeToK8s/"$1".png ./site/content/en/docs/benchmarks/timeToK8s/"$1".md - git commit -m 'update time-to-k8s chart' + git commit -m "add time-to-k8s benchmark for $1" +} + +create_pr() { + git remote add minikube-bot https://minikube-bot:"$2"@github.com/minikube-bot/minikube.git + git push -u minikube-bot addTimeToK8s"$1" + gh pr create --repo kubernetes/minikube --base master --title "Add time-to-k8s benchmark for $1" --body "Updating time-to-k8s benchmark as part of the release process" } install_kind install_k3d install_minikube -VERSION=$(minikube version --short) +install_gh "$1" +config_git +VERSION=$(minikube version --short) +create_branch "$VERSION" run_benchmark generate_chart "$VERSION" create_page "$VERSION" -commit_chart "$VERSION" +commit_changes "$VERSION" +create_pr "$VERSION" "$1" diff --git a/hack/jenkins/common.sh b/hack/jenkins/common.sh index 40bfad2e98..a8d8f4d5e4 100755 --- a/hack/jenkins/common.sh +++ b/hack/jenkins/common.sh @@ -419,7 +419,7 @@ fi touch "${HTML_OUT}" touch "${SUMMARY_OUT}" -gopogh_status=$(gopogh -in "${JSON_OUT}" -out_html "${HTML_OUT}" -out_summary "${SUMMARY_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}") || true +gopogh_status=$(gopogh -in "${JSON_OUT}" -out_html "${HTML_OUT}" -out_summary "${SUMMARY_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}:$(date +%Y-%m-%d)") || true fail_num=$(echo $gopogh_status | jq '.NumberOfFail') test_num=$(echo $gopogh_status | jq '.NumberOfTests') pessimistic_status="${fail_num} / ${test_num} failures" @@ -441,6 +441,11 @@ if [ -z "${EXTERNAL}" ]; then gsutil -qm cp "${HTML_OUT}" "gs://${JOB_GCS_BUCKET}.html" || true echo ">> uploading ${SUMMARY_OUT}" gsutil -qm cp "${SUMMARY_OUT}" "gs://${JOB_GCS_BUCKET}_summary.json" || true + if [[ "${MINIKUBE_LOCATION}" == "master" ]]; then + ./test-flake-chart/upload_tests.sh "${SUMMARY_OUT}" + elif [[ "${JOB_NAME}" == "Docker_Linux" || "${JOB_NAME}" == "Docker_Linux_containerd" || "${JOB_NAME}" == "KVM_Linux" || "${JOB_NAME}" == "KVM_Linux_containerd" ]]; then + ./test-flake-chart/report_flakes.sh "${MINIKUBE_LOCATION}" "${SUMMARY_OUT}" "${JOB_NAME}" + fi else # Otherwise, put the results in a predictable spot so the upload job can find them REPORTS_PATH=test_reports diff --git a/hack/jenkins/test-flake-chart/collect_data_manual.sh b/hack/jenkins/test-flake-chart/collect_data_manual.sh new file mode 100755 index 0000000000..287a1a63d5 --- /dev/null +++ b/hack/jenkins/test-flake-chart/collect_data_manual.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Copyright 2021 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Collects all test data manually, processes it, and uploads to GCS. This will +# overwrite any existing data. This should only be done for a dryrun, new data +# should be handled exclusively through upload_tests.sh. +# Example usage: ./collect_data_manual.sh + +DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + +# 1) "cat" together all summary files. +# 2) Process all summary files. +# 3) Optimize the resulting data. +# 4) Store in GCS bucket. +gsutil cat gs://minikube-builds/logs/master/*/*_summary.json \ +| $DIR/process_data.sh \ +| $DIR/optimize_data.sh \ +| gsutil cp - gs://minikube-flake-rate/data.csv diff --git a/hack/jenkins/test-flake-chart/compute_flake_rate.go b/hack/jenkins/test-flake-chart/compute_flake_rate.go new file mode 100644 index 0000000000..0025df2fbe --- /dev/null +++ b/hack/jenkins/test-flake-chart/compute_flake_rate.go @@ -0,0 +1,264 @@ +/* +Copyright 2021 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bufio" + "flag" + "fmt" + "io" + "os" + "runtime/debug" + "sort" + "strconv" + "strings" + "time" +) + +var ( + dataCsv = flag.String("data-csv", "", "Source data to compute flake rates on") + dateRange = flag.Uint("date-range", 5, "Number of test dates to consider when computing flake rate") +) + +func main() { + flag.Parse() + + file, err := os.Open(*dataCsv) + if err != nil { + exit("Unable to read data CSV", err) + } + + testEntries := readData(file) + splitEntries := splitData(testEntries) + filteredEntries := filterRecentEntries(splitEntries, *dateRange) + flakeRates := computeFlakeRates(filteredEntries) + averageDurations := computeAverageDurations(filteredEntries) + fmt.Println("Environment,Test,Flake Rate,Duration") + for environment, environmentSplit := range flakeRates { + for test, flakeRate := range environmentSplit { + duration := averageDurations[environment][test] + fmt.Printf("%s,%s,%.2f,%.3f\n", environment, test, flakeRate*100, duration) + } + } +} + +// One entry of a test run. +// Example: TestEntry { +// name: "TestFunctional/parallel/LogsCmd", +// environment: "Docker_Linux", +// date: time.Now, +// status: "Passed", +// duration: 0.1, +// } +type testEntry struct { + name string + environment string + date time.Time + status string + duration float32 +} + +// A map with keys of (environment, test_name) to values of slcies of TestEntry. +type splitEntryMap map[string]map[string][]testEntry + +// Reads CSV `file` and consumes each line to be a single TestEntry. +func readData(file io.Reader) []testEntry { + testEntries := []testEntry{} + + fileReader := bufio.NewReaderSize(file, 256) + previousLine := []string{"", "", "", "", "", ""} + firstLine := true + for { + lineBytes, _, err := fileReader.ReadLine() + if err != nil { + if err == io.EOF { + break + } + exit("Error reading data CSV", err) + } + line := string(lineBytes) + fields := strings.Split(line, ",") + if firstLine { + if len(fields) != 6 { + exit(fmt.Sprintf("Data CSV in incorrect format. Expected 6 columns, but got %d", len(fields)), fmt.Errorf("bad CSV format")) + } + firstLine = false + } + for i, field := range fields { + if field == "" { + fields[i] = previousLine[i] + } + } + if len(fields) != 6 { + fmt.Printf("Found line with wrong number of columns. Expectd 6, but got %d - skipping\n", len(fields)) + continue + } + previousLine = fields + if fields[4] == "Passed" || fields[4] == "Failed" { + date, err := time.Parse("2006-01-02", fields[1]) + if err != nil { + fmt.Printf("Failed to parse date: %v\n", err) + continue + } + duration, err := strconv.ParseFloat(fields[5], 32) + if err != nil { + fmt.Printf("Failed to parse duration: %v\n", err) + continue + } + testEntries = append(testEntries, testEntry{ + name: fields[3], + environment: fields[2], + date: date, + status: fields[4], + duration: float32(duration), + }) + } + } + return testEntries +} + +// Splits `testEntries` up into maps indexed first by environment and then by test. +func splitData(testEntries []testEntry) splitEntryMap { + splitEntries := make(splitEntryMap) + + for _, entry := range testEntries { + appendEntry(splitEntries, entry.environment, entry.name, entry) + } + + return splitEntries +} + +// Appends `entry` to `splitEntries` at the `environment` and `test`. +func appendEntry(splitEntries splitEntryMap, environment, test string, entry testEntry) { + // Lookup the environment. + environmentSplit, ok := splitEntries[environment] + if !ok { + // If the environment map is missing, make a map for this environment and store it. + environmentSplit = make(map[string][]testEntry) + splitEntries[environment] = environmentSplit + } + + // Lookup the test. + testSplit, ok := environmentSplit[test] + if !ok { + // If the test is missing, make a slice for this test. + testSplit = make([]testEntry, 0) + // The slice is not inserted, since it will be replaced anyway. + } + environmentSplit[test] = append(testSplit, entry) +} + +// Filters `splitEntries` to include only the most recent `date_range` dates. +func filterRecentEntries(splitEntries splitEntryMap, dateRange uint) splitEntryMap { + filteredEntries := make(splitEntryMap) + + for environment, environmentSplit := range splitEntries { + for test, testSplit := range environmentSplit { + dates := make([]time.Time, len(testSplit)) + for _, entry := range testSplit { + dates = append(dates, entry.date) + } + // Sort dates from future to past. + sort.Slice(dates, func(i, j int) bool { + return dates[j].Before(dates[i]) + }) + datesInRange := make([]time.Time, 0, dateRange) + var lastDate time.Time + // Go through each date. + for _, date := range dates { + // If date is the same as last date, ignore it. + if date.Equal(lastDate) { + continue + } + + // Add the date. + datesInRange = append(datesInRange, date) + lastDate = date + // If the date_range has been hit, break out. + if uint(len(datesInRange)) == dateRange { + break + } + } + + for _, entry := range testSplit { + // Look for the first element <= entry.date + index := sort.Search(len(datesInRange), func(i int) bool { + return !datesInRange[i].After(entry.date) + }) + // If no date is <= entry.date, or the found date does not equal entry.date. + if index == len(datesInRange) || !datesInRange[index].Equal(entry.date) { + continue + } + appendEntry(filteredEntries, environment, test, entry) + } + } + } + return filteredEntries +} + +// Computes the flake rates over each entry in `splitEntries`. +func computeFlakeRates(splitEntries splitEntryMap) map[string]map[string]float32 { + flakeRates := make(map[string]map[string]float32) + for environment, environmentSplit := range splitEntries { + for test, testSplit := range environmentSplit { + failures := 0 + for _, entry := range testSplit { + if entry.status == "Failed" { + failures++ + } + } + setValue(flakeRates, environment, test, float32(failures)/float32(len(testSplit))) + } + } + return flakeRates +} + +// Computes the average durations over each entry in `splitEntries`. +func computeAverageDurations(splitEntries splitEntryMap) map[string]map[string]float32 { + averageDurations := make(map[string]map[string]float32) + for environment, environmentSplit := range splitEntries { + for test, testSplit := range environmentSplit { + durationSum := float32(0) + for _, entry := range testSplit { + durationSum += entry.duration + } + if len(testSplit) != 0 { + durationSum /= float32(len(testSplit)) + } + setValue(averageDurations, environment, test, durationSum) + } + } + return averageDurations +} + +// Sets the `value` of keys `environment` and `test` in `mapEntries`. +func setValue(mapEntries map[string]map[string]float32, environment, test string, value float32) { + // Lookup the environment. + environmentRates, ok := mapEntries[environment] + if !ok { + // If the environment map is missing, make a map for this environment and store it. + environmentRates = make(map[string]float32) + mapEntries[environment] = environmentRates + } + environmentRates[test] = value +} + +// exit will exit and clean up minikube +func exit(msg string, err error) { + fmt.Printf("WithError(%s)=%v called from:\n%s", msg, err, debug.Stack()) + os.Exit(60) +} diff --git a/hack/jenkins/test-flake-chart/compute_flake_rate_test.go b/hack/jenkins/test-flake-chart/compute_flake_rate_test.go new file mode 100644 index 0000000000..d4013c0885 --- /dev/null +++ b/hack/jenkins/test-flake-chart/compute_flake_rate_test.go @@ -0,0 +1,492 @@ +/* +Copyright 2021 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "strings" + "testing" + "time" +) + +func simpleDate(year int, day int) time.Time { + return time.Date(year, time.January, day, 0, 0, 0, 0, time.UTC) +} + +func compareEntrySlices(t *testing.T, actualData, expectedData []testEntry, extra string) { + if extra != "" { + extra = fmt.Sprintf(" (%s)", extra) + } + for i, actual := range actualData { + if len(expectedData) <= i { + t.Errorf("Received unmatched actual element at index %d%s. Actual: %v", i, extra, actual) + continue + } + expected := expectedData[i] + if actual != expected { + t.Errorf("Elements differ at index %d%s. Expected: %v, Actual: %v", i, extra, expected, actual) + } + } + + if len(actualData) < len(expectedData) { + for i := len(actualData); i < len(expectedData); i++ { + t.Errorf("Missing unmatched expected element at index %d%s. Expected: %v", i, extra, expectedData[i]) + } + } +} + +func TestReadData(t *testing.T) { + actualData := readData(strings.NewReader( + `A,B,C,D,E,F + hash,2000-01-01,env1,test1,Passed,1 + hash,2001-01-01,env2,test2,Failed,0.5 + hash,,,test1,,0.6 + hash,2002-01-01,,,Passed,0.9 + hash,2003-01-01,env3,test3,Passed,2`, + )) + expectedData := []testEntry{ + { + name: "test1", + environment: "env1", + date: simpleDate(2000, 1), + status: "Passed", + duration: 1, + }, + { + name: "test2", + environment: "env2", + date: simpleDate(2001, 1), + status: "Failed", + duration: 0.5, + }, + { + name: "test1", + environment: "env2", + date: simpleDate(2001, 1), + status: "Failed", + duration: 0.6, + }, + { + name: "test1", + environment: "env2", + date: simpleDate(2002, 1), + status: "Passed", + duration: 0.9, + }, + { + name: "test3", + environment: "env3", + date: simpleDate(2003, 1), + status: "Passed", + duration: 2, + }, + } + + compareEntrySlices(t, actualData, expectedData, "") +} + +func compareSplitData(t *testing.T, actual, expected splitEntryMap) { + for environment, actualTests := range actual { + expectedTests, environmentOk := expected[environment] + if !environmentOk { + t.Errorf("Unexpected environment %s in actual", environment) + continue + } + + for test, actualEntries := range actualTests { + expectedEntries, testOk := expectedTests[test] + if !testOk { + t.Errorf("Unexpected test %s (in environment %s) in actual", test, environment) + continue + } + + compareEntrySlices(t, actualEntries, expectedEntries, fmt.Sprintf("environment %s, test %s", environment, test)) + } + + for test := range expectedTests { + _, testOk := actualTests[test] + if !testOk { + t.Errorf("Missing expected test %s (in environment %s) in actual", test, environment) + } + } + } + + for environment := range expected { + _, environmentOk := actual[environment] + if !environmentOk { + t.Errorf("Missing expected environment %s in actual", environment) + } + } +} + +func TestSplitData(t *testing.T) { + entryE1T1_1, entryE1T1_2 := testEntry{ + name: "test1", + environment: "env1", + date: simpleDate(2000, 1), + status: "Passed", + }, testEntry{ + name: "test1", + environment: "env1", + date: simpleDate(2000, 2), + status: "Passed", + } + entryE1T2 := testEntry{ + name: "test2", + environment: "env1", + date: simpleDate(2000, 1), + status: "Passed", + } + entryE2T1 := testEntry{ + name: "test1", + environment: "env2", + date: simpleDate(2000, 1), + status: "Passed", + } + entryE2T2 := testEntry{ + name: "test2", + environment: "env2", + date: simpleDate(2000, 1), + status: "Passed", + } + actual := splitData([]testEntry{entryE1T1_1, entryE1T1_2, entryE1T2, entryE2T1, entryE2T2}) + expected := splitEntryMap{ + "env1": { + "test1": {entryE1T1_1, entryE1T1_2}, + "test2": {entryE1T2}, + }, + "env2": { + "test1": {entryE2T1}, + "test2": {entryE2T2}, + }, + } + + compareSplitData(t, actual, expected) +} + +func TestFilterRecentEntries(t *testing.T) { + entryE1T1R1, entryE1T1R2, entryE1T1R3, entryE1T1O1, entryE1T1O2 := testEntry{ + name: "test1", + environment: "env1", + date: simpleDate(2000, 4), + status: "Passed", + }, testEntry{ + name: "test1", + environment: "env1", + date: simpleDate(2000, 3), + status: "Passed", + }, testEntry{ + name: "test1", + environment: "env1", + date: simpleDate(2000, 3), + status: "Passed", + }, testEntry{ + name: "test1", + environment: "env1", + date: simpleDate(2000, 2), + status: "Passed", + }, testEntry{ + name: "test1", + environment: "env1", + date: simpleDate(2000, 1), + status: "Passed", + } + entryE1T2R1, entryE1T2R2, entryE1T2O1 := testEntry{ + name: "test2", + environment: "env1", + date: simpleDate(2001, 3), + status: "Passed", + }, testEntry{ + name: "test2", + environment: "env1", + date: simpleDate(2001, 2), + status: "Passed", + }, testEntry{ + name: "test2", + environment: "env1", + date: simpleDate(2001, 1), + status: "Passed", + } + entryE2T2R1, entryE2T2R2, entryE2T2O1 := testEntry{ + name: "test2", + environment: "env2", + date: simpleDate(2003, 3), + status: "Passed", + }, testEntry{ + name: "test2", + environment: "env2", + date: simpleDate(2003, 2), + status: "Passed", + }, testEntry{ + name: "test2", + environment: "env2", + date: simpleDate(2003, 1), + status: "Passed", + } + + actualData := filterRecentEntries(splitEntryMap{ + "env1": { + "test1": { + entryE1T1R1, + entryE1T1R2, + entryE1T1R3, + entryE1T1O1, + entryE1T1O2, + }, + "test2": { + entryE1T2R1, + entryE1T2R2, + entryE1T2O1, + }, + }, + "env2": { + "test2": { + entryE2T2R1, + entryE2T2R2, + entryE2T2O1, + }, + }, + }, 2) + + expectedData := splitEntryMap{ + "env1": { + "test1": { + entryE1T1R1, + entryE1T1R2, + entryE1T1R3, + }, + "test2": { + entryE1T2R1, + entryE1T2R2, + }, + }, + "env2": { + "test2": { + entryE2T2R1, + entryE2T2R2, + }, + }, + } + + compareSplitData(t, actualData, expectedData) +} + +func compareValues(t *testing.T, actualValues, expectedValues map[string]map[string]float32) { + for environment, actualTests := range actualValues { + expectedTests, environmentOk := expectedValues[environment] + if !environmentOk { + t.Errorf("Unexpected environment %s in actual", environment) + continue + } + + for test, actualValue := range actualTests { + expectedValue, testOk := expectedTests[test] + if !testOk { + t.Errorf("Unexpected test %s (in environment %s) in actual", test, environment) + continue + } + + if actualValue != expectedValue { + t.Errorf("Wrong value at environment %s and test %s. Expected: %v, Actual: %v", environment, test, expectedValue, actualValue) + } + } + + for test := range expectedTests { + _, testOk := actualTests[test] + if !testOk { + t.Errorf("Missing expected test %s (in environment %s) in actual", test, environment) + } + } + } + + for environment := range expectedValues { + _, environmentOk := actualValues[environment] + if !environmentOk { + t.Errorf("Missing expected environment %s in actual", environment) + } + } +} + +func TestComputeFlakeRates(t *testing.T) { + actualData := computeFlakeRates(splitEntryMap{ + "env1": { + "test1": { + { + name: "test1", + environment: "env1", + date: simpleDate(2000, 4), + status: "Passed", + }, { + name: "test1", + environment: "env1", + date: simpleDate(2000, 3), + status: "Passed", + }, { + name: "test1", + environment: "env1", + date: simpleDate(2000, 3), + status: "Passed", + }, { + name: "test1", + environment: "env1", + date: simpleDate(2000, 2), + status: "Passed", + }, { + name: "test1", + environment: "env1", + date: simpleDate(2000, 1), + status: "Failed", + }, + }, + "test2": { + { + name: "test2", + environment: "env1", + date: simpleDate(2001, 3), + status: "Failed", + }, { + name: "test2", + environment: "env1", + date: simpleDate(2001, 2), + status: "Failed", + }, { + name: "test2", + environment: "env1", + date: simpleDate(2001, 1), + status: "Failed", + }, + }, + }, + "env2": { + "test2": { + { + name: "test2", + environment: "env2", + date: simpleDate(2003, 3), + status: "Passed", + }, testEntry{ + name: "test2", + environment: "env2", + date: simpleDate(2003, 2), + status: "Failed", + }, + }, + }, + }) + + expectedData := map[string]map[string]float32{ + "env1": { + "test1": 0.2, + "test2": 1, + }, + "env2": { + "test2": 0.5, + }, + } + + compareValues(t, actualData, expectedData) +} + +func TestComputeAverageDurations(t *testing.T) { + actualData := computeAverageDurations(splitEntryMap{ + "env1": { + "test1": { + { + name: "test1", + environment: "env1", + date: simpleDate(2000, 4), + status: "Passed", + duration: 1, + }, { + name: "test1", + environment: "env1", + date: simpleDate(2000, 3), + status: "Passed", + duration: 2, + }, { + name: "test1", + environment: "env1", + date: simpleDate(2000, 3), + status: "Passed", + duration: 3, + }, { + name: "test1", + environment: "env1", + date: simpleDate(2000, 2), + status: "Passed", + duration: 3, + }, { + name: "test1", + environment: "env1", + date: simpleDate(2000, 1), + status: "Failed", + duration: 3, + }, + }, + "test2": { + { + name: "test2", + environment: "env1", + date: simpleDate(2001, 3), + status: "Failed", + duration: 1, + }, { + name: "test2", + environment: "env1", + date: simpleDate(2001, 2), + status: "Failed", + duration: 3, + }, { + name: "test2", + environment: "env1", + date: simpleDate(2001, 1), + status: "Failed", + duration: 3, + }, + }, + }, + "env2": { + "test2": { + { + name: "test2", + environment: "env2", + date: simpleDate(2003, 3), + status: "Passed", + duration: 0.5, + }, testEntry{ + name: "test2", + environment: "env2", + date: simpleDate(2003, 2), + status: "Failed", + duration: 1.5, + }, + }, + }, + }) + + expectedData := map[string]map[string]float32{ + "env1": { + "test1": float32(12) / float32(5), + "test2": float32(7) / float32(3), + }, + "env2": { + "test2": 1, + }, + } + + compareValues(t, actualData, expectedData) +} diff --git a/hack/jenkins/test-flake-chart/flake_chart.html b/hack/jenkins/test-flake-chart/flake_chart.html new file mode 100644 index 0000000000..beaf224c20 --- /dev/null +++ b/hack/jenkins/test-flake-chart/flake_chart.html @@ -0,0 +1,9 @@ + +
+ + + + + + + \ No newline at end of file diff --git a/hack/jenkins/test-flake-chart/flake_chart.js b/hack/jenkins/test-flake-chart/flake_chart.js new file mode 100644 index 0000000000..736fc7cd7a --- /dev/null +++ b/hack/jenkins/test-flake-chart/flake_chart.js @@ -0,0 +1,194 @@ + +// Displays an error message to the UI. Any previous message will be erased. +function displayError(message) { + console.error(message); +} + +// Creates a generator that reads the response body one line at a time. +async function* bodyByLinesIterator(response) { + // TODO: Replace this with something that actually reads the body line by line + // (since the file can be big). + const lines = (await response.text()).split("\n"); + for (let line of lines) { + // Skip any empty lines (most likely at the end). + if (line !== "") { + yield line; + } + } +} + +// Determines whether `str` matches at least one value in `enumObject`. +function isValidEnumValue(enumObject, str) { + for (const enumKey in enumObject) { + if (enumObject[enumKey] === str) { + return true; + } + } + return false; +} + +// Enum for test status. +const testStatus = { + PASSED: "Passed", + FAILED: "Failed", + SKIPPED: "Skipped" +} + +async function loadTestData() { + const response = await fetch("data.csv"); + if (!response.ok) { + const responseText = await response.text(); + throw `Failed to fetch data from GCS bucket. Error: ${responseText}`; + } + + const lines = bodyByLinesIterator(response); + // Consume the header to ensure the data has the right number of fields. + const header = (await lines.next()).value; + if (header.split(",").length != 6) { + throw `Fetched CSV data contains wrong number of fields. Expected: 6. Actual Header: "${header}"`; + } + + const testData = []; + let lineData = ["", "", "", "", "", ""]; + for await (const line of lines) { + let splitLine = line.split(","); + if (splitLine.length != 6) { + console.warn(`Found line with wrong number of fields. Actual: ${splitLine.length} Expected: 6. Line: "${line}"`); + continue; + } + splitLine = splitLine.map((value, index) => value === "" ? lineData[index] : value); + lineData = splitLine; + if (!isValidEnumValue(testStatus, splitLine[4])) { + console.warn(`Invalid test status provided. Actual: ${splitLine[4]} Expected: One of ${Object.values(testStatus).join(", ")}`); + continue; + } + testData.push({ + commit: splitLine[0], + date: new Date(splitLine[1]), + environment: splitLine[2], + name: splitLine[3], + status: splitLine[4], + duration: Number(splitLine[5]), + }); + } + if (testData.length == 0) { + throw "Fetched CSV data is empty or poorly formatted."; + } + return testData; +} + +// Computes the average of an array of numbers. +Array.prototype.average = function () { + return this.length === 0 ? 0 : this.reduce((sum, value) => sum + value, 0) / this.length; +}; + +// Groups array elements by keys obtained through `keyGetter`. +Array.prototype.groupBy = function (keyGetter) { + return Array.from(this.reduce((mapCollection, element) => { + const key = keyGetter(element); + if (mapCollection.has(key)) { + mapCollection.get(key).push(element); + } else { + mapCollection.set(key, [element]); + } + return mapCollection; + }, new Map()).values()); +}; + +// Parse URL search `query` into [{key, value}]. +function parseUrlQuery(query) { + if (query[0] === '?') { + query = query.substring(1); + } + return Object.fromEntries((query === "" ? [] : query.split("&")).map(element => { + const keyValue = element.split("="); + return [unescape(keyValue[0]), unescape(keyValue[1])]; + })); +} + +async function init() { + google.charts.load('current', { 'packages': ['corechart'] }); + let testData; + try { + // Wait for Google Charts to load, and for test data to load. + // Only store the test data (at index 1) into `testData`. + testData = (await Promise.all([ + new Promise(resolve => google.charts.setOnLoadCallback(resolve)), + loadTestData() + ]))[1]; + } catch (err) { + displayError(err); + return; + } + + const data = new google.visualization.DataTable(); + data.addColumn('date', 'Date'); + data.addColumn('number', 'Flake Percentage'); + data.addColumn({ type: 'string', role: 'tooltip', 'p': { 'html': true } }); + data.addColumn('number', 'Duration'); + data.addColumn({ type: 'string', role: 'tooltip', 'p': { 'html': true } }); + + const query = parseUrlQuery(window.location.search); + const desiredTest = query.test || "", desiredEnvironment = query.env || ""; + + const groups = testData + // Filter to only contain unskipped runs of the requested test and requested environment. + .filter(test => test.name === desiredTest && test.environment === desiredEnvironment && test.status !== testStatus.SKIPPED) + .groupBy(test => test.date.getTime()); + + data.addRows( + groups + // Sort by run date, past to future. + .sort((a, b) => a[0].date - b[0].date) + // Map each group to all variables need to format the rows. + .map(tests => ({ + date: tests[0].date, // Get one of the dates from the tests (which will all be the same). + flakeRate: tests.map(test => test.status === testStatus.FAILED ? 100 : 0).average(), // Compute average of runs where FAILED counts as 100%. + duration: tests.map(test => test.duration).average(), // Compute average duration of runs. + commitHashes: tests.map(test => ({ // Take all hashes, statuses, and durations of tests in this group. + hash: test.commit, + status: test.status, + duration: test.duration + })) + })) + .map(groupData => [ + groupData.date, + groupData.flakeRate, + `