Merge pull request #11602 from andriyDev/Flakes

Create a system for generating public flake rate reporting.
pull/11662/head
Medya Ghazizadeh 2021-06-15 12:51:29 -04:00 committed by GitHub
commit 6a9076391d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 1192 additions and 1 deletions

View File

@ -419,7 +419,7 @@ fi
touch "${HTML_OUT}"
touch "${SUMMARY_OUT}"
gopogh_status=$(gopogh -in "${JSON_OUT}" -out_html "${HTML_OUT}" -out_summary "${SUMMARY_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}") || true
gopogh_status=$(gopogh -in "${JSON_OUT}" -out_html "${HTML_OUT}" -out_summary "${SUMMARY_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}:$(date +%Y-%m-%d)") || true
fail_num=$(echo $gopogh_status | jq '.NumberOfFail')
test_num=$(echo $gopogh_status | jq '.NumberOfTests')
pessimistic_status="${fail_num} / ${test_num} failures"
@ -441,6 +441,11 @@ if [ -z "${EXTERNAL}" ]; then
gsutil -qm cp "${HTML_OUT}" "gs://${JOB_GCS_BUCKET}.html" || true
echo ">> uploading ${SUMMARY_OUT}"
gsutil -qm cp "${SUMMARY_OUT}" "gs://${JOB_GCS_BUCKET}_summary.json" || true
if [[ "${MINIKUBE_LOCATION}" == "master" ]]; then
./test-flake-chart/upload_tests.sh "${SUMMARY_OUT}"
elif [[ "${JOB_NAME}" == "Docker_Linux" || "${JOB_NAME}" == "Docker_Linux_containerd" || "${JOB_NAME}" == "KVM_Linux" || "${JOB_NAME}" == "KVM_Linux_containerd" ]]; then
./test-flake-chart/report_flakes.sh "${MINIKUBE_LOCATION}" "${SUMMARY_OUT}" "${JOB_NAME}"
fi
else
# Otherwise, put the results in a predictable spot so the upload job can find them
REPORTS_PATH=test_reports

View File

@ -0,0 +1,31 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Collects all test data manually, processes it, and uploads to GCS. This will
# overwrite any existing data. This should only be done for a dryrun, new data
# should be handled exclusively through upload_tests.sh.
# Example usage: ./collect_data_manual.sh
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# 1) "cat" together all summary files.
# 2) Process all summary files.
# 3) Optimize the resulting data.
# 4) Store in GCS bucket.
gsutil cat gs://minikube-builds/logs/master/*/*_summary.json \
| $DIR/process_data.sh
| $DIR/optimize_data.sh
| gsutil cp - gs://minikube-flake-rate/data.csv

View File

@ -0,0 +1,264 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bufio"
"flag"
"fmt"
"io"
"os"
"runtime/debug"
"sort"
"strconv"
"strings"
"time"
)
var (
dataCsv = flag.String("data-csv", "", "Source data to compute flake rates on")
dateRange = flag.Uint("date-range", 5, "Number of test dates to consider when computing flake rate")
)
func main() {
flag.Parse()
file, err := os.Open(*dataCsv)
if err != nil {
exit("Unable to read data CSV", err)
}
testEntries := readData(file)
splitEntries := splitData(testEntries)
filteredEntries := filterRecentEntries(splitEntries, *dateRange)
flakeRates := computeFlakeRates(filteredEntries)
averageDurations := computeAverageDurations(filteredEntries)
fmt.Println("Environment,Test,Flake Rate,Duration")
for environment, environmentSplit := range flakeRates {
for test, flakeRate := range environmentSplit {
duration := averageDurations[environment][test]
fmt.Printf("%s,%s,%.2f,%.3f\n", environment, test, flakeRate*100, duration)
}
}
}
// One entry of a test run.
// Example: TestEntry {
// name: "TestFunctional/parallel/LogsCmd",
// environment: "Docker_Linux",
// date: time.Now,
// status: "Passed",
// duration: 0.1,
// }
type testEntry struct {
name string
environment string
date time.Time
status string
duration float32
}
// A map with keys of (environment, test_name) to values of slcies of TestEntry.
type splitEntryMap map[string]map[string][]testEntry
// Reads CSV `file` and consumes each line to be a single TestEntry.
func readData(file io.Reader) []testEntry {
testEntries := []testEntry{}
fileReader := bufio.NewReaderSize(file, 256)
previousLine := []string{"", "", "", "", "", ""}
firstLine := true
for {
lineBytes, _, err := fileReader.ReadLine()
if err != nil {
if err == io.EOF {
break
}
exit("Error reading data CSV", err)
}
line := string(lineBytes)
fields := strings.Split(line, ",")
if firstLine {
if len(fields) != 6 {
exit(fmt.Sprintf("Data CSV in incorrect format. Expected 6 columns, but got %d", len(fields)), fmt.Errorf("bad CSV format"))
}
firstLine = false
}
for i, field := range fields {
if field == "" {
fields[i] = previousLine[i]
}
}
if len(fields) != 6 {
fmt.Printf("Found line with wrong number of columns. Expectd 6, but got %d - skipping\n", len(fields))
continue
}
previousLine = fields
if fields[4] == "Passed" || fields[4] == "Failed" {
date, err := time.Parse("2006-01-02", fields[1])
if err != nil {
fmt.Printf("Failed to parse date: %v\n", err)
continue
}
duration, err := strconv.ParseFloat(fields[5], 32)
if err != nil {
fmt.Printf("Failed to parse duration: %v\n", err)
continue
}
testEntries = append(testEntries, testEntry{
name: fields[3],
environment: fields[2],
date: date,
status: fields[4],
duration: float32(duration),
})
}
}
return testEntries
}
// Splits `testEntries` up into maps indexed first by environment and then by test.
func splitData(testEntries []testEntry) splitEntryMap {
splitEntries := make(splitEntryMap)
for _, entry := range testEntries {
appendEntry(splitEntries, entry.environment, entry.name, entry)
}
return splitEntries
}
// Appends `entry` to `splitEntries` at the `environment` and `test`.
func appendEntry(splitEntries splitEntryMap, environment, test string, entry testEntry) {
// Lookup the environment.
environmentSplit, ok := splitEntries[environment]
if !ok {
// If the environment map is missing, make a map for this environment and store it.
environmentSplit = make(map[string][]testEntry)
splitEntries[environment] = environmentSplit
}
// Lookup the test.
testSplit, ok := environmentSplit[test]
if !ok {
// If the test is missing, make a slice for this test.
testSplit = make([]testEntry, 0)
// The slice is not inserted, since it will be replaced anyway.
}
environmentSplit[test] = append(testSplit, entry)
}
// Filters `splitEntries` to include only the most recent `date_range` dates.
func filterRecentEntries(splitEntries splitEntryMap, dateRange uint) splitEntryMap {
filteredEntries := make(splitEntryMap)
for environment, environmentSplit := range splitEntries {
for test, testSplit := range environmentSplit {
dates := make([]time.Time, len(testSplit))
for _, entry := range testSplit {
dates = append(dates, entry.date)
}
// Sort dates from future to past.
sort.Slice(dates, func(i, j int) bool {
return dates[j].Before(dates[i])
})
datesInRange := make([]time.Time, 0, dateRange)
var lastDate time.Time
// Go through each date.
for _, date := range dates {
// If date is the same as last date, ignore it.
if date.Equal(lastDate) {
continue
}
// Add the date.
datesInRange = append(datesInRange, date)
lastDate = date
// If the date_range has been hit, break out.
if uint(len(datesInRange)) == dateRange {
break
}
}
for _, entry := range testSplit {
// Look for the first element <= entry.date
index := sort.Search(len(datesInRange), func(i int) bool {
return !datesInRange[i].After(entry.date)
})
// If no date is <= entry.date, or the found date does not equal entry.date.
if index == len(datesInRange) || !datesInRange[index].Equal(entry.date) {
continue
}
appendEntry(filteredEntries, environment, test, entry)
}
}
}
return filteredEntries
}
// Computes the flake rates over each entry in `splitEntries`.
func computeFlakeRates(splitEntries splitEntryMap) map[string]map[string]float32 {
flakeRates := make(map[string]map[string]float32)
for environment, environmentSplit := range splitEntries {
for test, testSplit := range environmentSplit {
failures := 0
for _, entry := range testSplit {
if entry.status == "Failed" {
failures++
}
}
setValue(flakeRates, environment, test, float32(failures)/float32(len(testSplit)))
}
}
return flakeRates
}
// Computes the average durations over each entry in `splitEntries`.
func computeAverageDurations(splitEntries splitEntryMap) map[string]map[string]float32 {
averageDurations := make(map[string]map[string]float32)
for environment, environmentSplit := range splitEntries {
for test, testSplit := range environmentSplit {
durationSum := float32(0)
for _, entry := range testSplit {
durationSum += entry.duration
}
if len(testSplit) != 0 {
durationSum /= float32(len(testSplit))
}
setValue(averageDurations, environment, test, durationSum)
}
}
return averageDurations
}
// Sets the `value` of keys `environment` and `test` in `mapEntries`.
func setValue(mapEntries map[string]map[string]float32, environment, test string, value float32) {
// Lookup the environment.
environmentRates, ok := mapEntries[environment]
if !ok {
// If the environment map is missing, make a map for this environment and store it.
environmentRates = make(map[string]float32)
mapEntries[environment] = environmentRates
}
environmentRates[test] = value
}
// exit will exit and clean up minikube
func exit(msg string, err error) {
fmt.Printf("WithError(%s)=%v called from:\n%s", msg, err, debug.Stack())
os.Exit(60)
}

View File

@ -0,0 +1,492 @@
/*
Copyright 2021 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"strings"
"testing"
"time"
)
func simpleDate(year int, day int) time.Time {
return time.Date(year, time.January, day, 0, 0, 0, 0, time.UTC)
}
func compareEntrySlices(t *testing.T, actualData, expectedData []testEntry, extra string) {
if extra != "" {
extra = fmt.Sprintf(" (%s)", extra)
}
for i, actual := range actualData {
if len(expectedData) <= i {
t.Errorf("Received unmatched actual element at index %d%s. Actual: %v", i, extra, actual)
continue
}
expected := expectedData[i]
if actual != expected {
t.Errorf("Elements differ at index %d%s. Expected: %v, Actual: %v", i, extra, expected, actual)
}
}
if len(actualData) < len(expectedData) {
for i := len(actualData); i < len(expectedData); i++ {
t.Errorf("Missing unmatched expected element at index %d%s. Expected: %v", i, extra, expectedData[i])
}
}
}
func TestReadData(t *testing.T) {
actualData := readData(strings.NewReader(
`A,B,C,D,E,F
hash,2000-01-01,env1,test1,Passed,1
hash,2001-01-01,env2,test2,Failed,0.5
hash,,,test1,,0.6
hash,2002-01-01,,,Passed,0.9
hash,2003-01-01,env3,test3,Passed,2`,
))
expectedData := []testEntry{
{
name: "test1",
environment: "env1",
date: simpleDate(2000, 1),
status: "Passed",
duration: 1,
},
{
name: "test2",
environment: "env2",
date: simpleDate(2001, 1),
status: "Failed",
duration: 0.5,
},
{
name: "test1",
environment: "env2",
date: simpleDate(2001, 1),
status: "Failed",
duration: 0.6,
},
{
name: "test1",
environment: "env2",
date: simpleDate(2002, 1),
status: "Passed",
duration: 0.9,
},
{
name: "test3",
environment: "env3",
date: simpleDate(2003, 1),
status: "Passed",
duration: 2,
},
}
compareEntrySlices(t, actualData, expectedData, "")
}
func compareSplitData(t *testing.T, actual, expected splitEntryMap) {
for environment, actualTests := range actual {
expectedTests, environmentOk := expected[environment]
if !environmentOk {
t.Errorf("Unexpected environment %s in actual", environment)
continue
}
for test, actualEntries := range actualTests {
expectedEntries, testOk := expectedTests[test]
if !testOk {
t.Errorf("Unexpected test %s (in environment %s) in actual", test, environment)
continue
}
compareEntrySlices(t, actualEntries, expectedEntries, fmt.Sprintf("environment %s, test %s", environment, test))
}
for test := range expectedTests {
_, testOk := actualTests[test]
if !testOk {
t.Errorf("Missing expected test %s (in environment %s) in actual", test, environment)
}
}
}
for environment := range expected {
_, environmentOk := actual[environment]
if !environmentOk {
t.Errorf("Missing expected environment %s in actual", environment)
}
}
}
func TestSplitData(t *testing.T) {
entryE1T1_1, entryE1T1_2 := testEntry{
name: "test1",
environment: "env1",
date: simpleDate(2000, 1),
status: "Passed",
}, testEntry{
name: "test1",
environment: "env1",
date: simpleDate(2000, 2),
status: "Passed",
}
entryE1T2 := testEntry{
name: "test2",
environment: "env1",
date: simpleDate(2000, 1),
status: "Passed",
}
entryE2T1 := testEntry{
name: "test1",
environment: "env2",
date: simpleDate(2000, 1),
status: "Passed",
}
entryE2T2 := testEntry{
name: "test2",
environment: "env2",
date: simpleDate(2000, 1),
status: "Passed",
}
actual := splitData([]testEntry{entryE1T1_1, entryE1T1_2, entryE1T2, entryE2T1, entryE2T2})
expected := splitEntryMap{
"env1": {
"test1": {entryE1T1_1, entryE1T1_2},
"test2": {entryE1T2},
},
"env2": {
"test1": {entryE2T1},
"test2": {entryE2T2},
},
}
compareSplitData(t, actual, expected)
}
func TestFilterRecentEntries(t *testing.T) {
entryE1T1R1, entryE1T1R2, entryE1T1R3, entryE1T1O1, entryE1T1O2 := testEntry{
name: "test1",
environment: "env1",
date: simpleDate(2000, 4),
status: "Passed",
}, testEntry{
name: "test1",
environment: "env1",
date: simpleDate(2000, 3),
status: "Passed",
}, testEntry{
name: "test1",
environment: "env1",
date: simpleDate(2000, 3),
status: "Passed",
}, testEntry{
name: "test1",
environment: "env1",
date: simpleDate(2000, 2),
status: "Passed",
}, testEntry{
name: "test1",
environment: "env1",
date: simpleDate(2000, 1),
status: "Passed",
}
entryE1T2R1, entryE1T2R2, entryE1T2O1 := testEntry{
name: "test2",
environment: "env1",
date: simpleDate(2001, 3),
status: "Passed",
}, testEntry{
name: "test2",
environment: "env1",
date: simpleDate(2001, 2),
status: "Passed",
}, testEntry{
name: "test2",
environment: "env1",
date: simpleDate(2001, 1),
status: "Passed",
}
entryE2T2R1, entryE2T2R2, entryE2T2O1 := testEntry{
name: "test2",
environment: "env2",
date: simpleDate(2003, 3),
status: "Passed",
}, testEntry{
name: "test2",
environment: "env2",
date: simpleDate(2003, 2),
status: "Passed",
}, testEntry{
name: "test2",
environment: "env2",
date: simpleDate(2003, 1),
status: "Passed",
}
actualData := filterRecentEntries(splitEntryMap{
"env1": {
"test1": {
entryE1T1R1,
entryE1T1R2,
entryE1T1R3,
entryE1T1O1,
entryE1T1O2,
},
"test2": {
entryE1T2R1,
entryE1T2R2,
entryE1T2O1,
},
},
"env2": {
"test2": {
entryE2T2R1,
entryE2T2R2,
entryE2T2O1,
},
},
}, 2)
expectedData := splitEntryMap{
"env1": {
"test1": {
entryE1T1R1,
entryE1T1R2,
entryE1T1R3,
},
"test2": {
entryE1T2R1,
entryE1T2R2,
},
},
"env2": {
"test2": {
entryE2T2R1,
entryE2T2R2,
},
},
}
compareSplitData(t, actualData, expectedData)
}
func compareValues(t *testing.T, actualValues, expectedValues map[string]map[string]float32) {
for environment, actualTests := range actualValues {
expectedTests, environmentOk := expectedValues[environment]
if !environmentOk {
t.Errorf("Unexpected environment %s in actual", environment)
continue
}
for test, actualValue := range actualTests {
expectedValue, testOk := expectedTests[test]
if !testOk {
t.Errorf("Unexpected test %s (in environment %s) in actual", test, environment)
continue
}
if actualValue != expectedValue {
t.Errorf("Wrong value at environment %s and test %s. Expected: %v, Actual: %v", environment, test, expectedValue, actualValue)
}
}
for test := range expectedTests {
_, testOk := actualTests[test]
if !testOk {
t.Errorf("Missing expected test %s (in environment %s) in actual", test, environment)
}
}
}
for environment := range expectedValues {
_, environmentOk := actualValues[environment]
if !environmentOk {
t.Errorf("Missing expected environment %s in actual", environment)
}
}
}
func TestComputeFlakeRates(t *testing.T) {
actualData := computeFlakeRates(splitEntryMap{
"env1": {
"test1": {
{
name: "test1",
environment: "env1",
date: simpleDate(2000, 4),
status: "Passed",
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 3),
status: "Passed",
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 3),
status: "Passed",
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 2),
status: "Passed",
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 1),
status: "Failed",
},
},
"test2": {
{
name: "test2",
environment: "env1",
date: simpleDate(2001, 3),
status: "Failed",
}, {
name: "test2",
environment: "env1",
date: simpleDate(2001, 2),
status: "Failed",
}, {
name: "test2",
environment: "env1",
date: simpleDate(2001, 1),
status: "Failed",
},
},
},
"env2": {
"test2": {
{
name: "test2",
environment: "env2",
date: simpleDate(2003, 3),
status: "Passed",
}, testEntry{
name: "test2",
environment: "env2",
date: simpleDate(2003, 2),
status: "Failed",
},
},
},
})
expectedData := map[string]map[string]float32{
"env1": {
"test1": 0.2,
"test2": 1,
},
"env2": {
"test2": 0.5,
},
}
compareValues(t, actualData, expectedData)
}
func TestComputeAverageDurations(t *testing.T) {
actualData := computeAverageDurations(splitEntryMap{
"env1": {
"test1": {
{
name: "test1",
environment: "env1",
date: simpleDate(2000, 4),
status: "Passed",
duration: 1,
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 3),
status: "Passed",
duration: 2,
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 3),
status: "Passed",
duration: 3,
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 2),
status: "Passed",
duration: 3,
}, {
name: "test1",
environment: "env1",
date: simpleDate(2000, 1),
status: "Failed",
duration: 3,
},
},
"test2": {
{
name: "test2",
environment: "env1",
date: simpleDate(2001, 3),
status: "Failed",
duration: 1,
}, {
name: "test2",
environment: "env1",
date: simpleDate(2001, 2),
status: "Failed",
duration: 3,
}, {
name: "test2",
environment: "env1",
date: simpleDate(2001, 1),
status: "Failed",
duration: 3,
},
},
},
"env2": {
"test2": {
{
name: "test2",
environment: "env2",
date: simpleDate(2003, 3),
status: "Passed",
duration: 0.5,
}, testEntry{
name: "test2",
environment: "env2",
date: simpleDate(2003, 2),
status: "Failed",
duration: 1.5,
},
},
},
})
expectedData := map[string]map[string]float32{
"env1": {
"test1": float32(12) / float32(5),
"test2": float32(7) / float32(3),
},
"env2": {
"test2": 1,
},
}
compareValues(t, actualData, expectedData)
}

View File

@ -0,0 +1,9 @@
<html>
<head>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
</head>
<body>
<div id="chart_div"></div>
</body>
<script src="flake_chart.js"></script>
</html>

View File

@ -0,0 +1,193 @@
// Displays an error message to the UI. Any previous message will be erased.
function displayError(message) {
console.error(message);
}
// Creates a generator that reads the response body one line at a time.
async function* bodyByLinesIterator(response) {
// TODO: Replace this with something that actually reads the body line by line
// (since the file can be big).
const lines = (await response.text()).split("\n");
for (let line of lines) {
// Skip any empty lines (most likely at the end).
if (line !== "") {
yield line;
}
}
}
// Determines whether `str` matches at least one value in `enumObject`.
function isValidEnumValue(enumObject, str) {
for (const enumKey in enumObject) {
if (enumObject[enumKey] === str) {
return true;
}
}
return false;
}
// Enum for test status.
const testStatus = {
PASSED: "Passed",
FAILED: "Failed",
SKIPPED: "Skipped"
}
async function loadTestData() {
const response = await fetch("data.csv");
if (!response.ok) {
const responseText = await response.text();
throw `Failed to fetch data from GCS bucket. Error: ${responseText}`;
}
const lines = bodyByLinesIterator(response);
// Consume the header to ensure the data has the right number of fields.
const header = (await lines.next()).value;
if (header.split(",").length != 6) {
throw `Fetched CSV data contains wrong number of fields. Expected: 6. Actual Header: "${header}"`;
}
const testData = [];
let lineData = ["", "", "", "", "", ""];
for await (const line of lines) {
let splitLine = line.split(",");
if (splitLine.length != 6) {
console.warn(`Found line with wrong number of fields. Actual: ${splitLine.length} Expected: 6. Line: "${line}"`);
continue;
}
splitLine = splitLine.map((value, index) => value === "" ? lineData[index] : value);
lineData = splitLine;
if (!isValidEnumValue(testStatus, splitLine[4])) {
console.warn(`Invalid test status provided. Actual: ${splitLine[4]} Expected: One of ${Object.values(testStatus).join(", ")}`);
continue;
}
testData.push({
commit: splitLine[0],
date: new Date(splitLine[1]),
environment: splitLine[2],
name: splitLine[3],
status: splitLine[4],
duration: Number(splitLine[5]),
});
}
if (testData.length == 0) {
throw "Fetched CSV data is empty or poorly formatted.";
}
return testData;
}
// Computes the average of an array of numbers.
Array.prototype.average = function () {
return this.length === 0 ? 0 : this.reduce((sum, value) => sum + value, 0) / this.length;
};
// Groups array elements by keys obtained through `keyGetter`.
Array.prototype.groupBy = function (keyGetter) {
return Array.from(this.reduce((mapCollection, element) => {
const key = keyGetter(element);
if (mapCollection.has(key)) {
mapCollection.get(key).push(element);
} else {
mapCollection.set(key, [element]);
}
return mapCollection;
}, new Map()).values());
};
// Parse URL search `query` into [{key, value}].
function parseUrlQuery(query) {
if (query[0] === '?') {
query = query.substring(1);
}
return Object.fromEntries((query === "" ? [] : query.split("&")).map(element => {
const keyValue = element.split("=");
return [unescape(keyValue[0]), unescape(keyValue[1])];
}));
}
async function init() {
google.charts.load('current', { 'packages': ['corechart'] });
let testData;
try {
// Wait for Google Charts to load, and for test data to load.
// Only store the test data (at index 1) into `testData`.
testData = (await Promise.all([
new Promise(resolve => google.charts.setOnLoadCallback(resolve)),
loadTestData()
]))[1];
} catch (err) {
displayError(err);
return;
}
const data = new google.visualization.DataTable();
data.addColumn('date', 'Date');
data.addColumn('number', 'Flake Percentage');
data.addColumn({ type: 'string', role: 'tooltip', 'p': { 'html': true } });
data.addColumn('number', 'Duration');
data.addColumn({ type: 'string', role: 'tooltip', 'p': { 'html': true } });
const query = parseUrlQuery(window.location.search);
const desiredTest = query.test || "", desiredEnvironment = query.env || "";
const groups = testData
// Filter to only contain unskipped runs of the requested test and requested environment.
.filter(test => test.name === desiredTest && test.environment === desiredEnvironment && test.status !== testStatus.SKIPPED)
.groupBy(test => test.date.getTime());
data.addRows(
groups
// Sort by run date, past to future.
.sort((a, b) => a[0].date - b[0].date)
// Map each group to all variables need to format the rows.
.map(tests => ({
date: tests[0].date, // Get one of the dates from the tests (which will all be the same).
flakeRate: tests.map(test => test.status === testStatus.FAILED ? 100 : 0).average(), // Compute average of runs where FAILED counts as 100%.
duration: tests.map(test => test.duration).average(), // Compute average duration of runs.
commitHashes: tests.map(test => ({ // Take all hashes, statuses, and durations of tests in this group.
hash: test.commit,
status: test.status,
duration: test.duration
}))
}))
.map(groupData => [
groupData.date,
groupData.flakeRate,
`<div class="py-2 ps-2">
<b>${groupData.date.toString()}</b><br>
<b>Flake Percentage:</b> ${groupData.flakeRate.toFixed(2)}%<br>
<b>Hashes:</b><br>
${groupData.commitHashes.map(({ hash, status }) => ` - ${hash} (${status})`).join("<br>")}
</div>`,
groupData.duration,
`<div class="py-2 ps-2">
<b>${groupData.date.toString()}</b><br>
<b>Average Duration:</b> ${groupData.duration.toFixed(2)}s<br>
<b>Hashes:</b><br>
${groupData.commitHashes.map(({ hash, duration }) => ` - ${hash} (${duration}s)`).join("<br>")}
</div>`,
])
);
const options = {
title: `Flake rate and duration by day of ${desiredTest} on ${desiredEnvironment}`,
width: window.innerWidth,
height: window.innerHeight,
pointSize: 10,
pointShape: "circle",
series: {
0: { targetAxisIndex: 0 },
1: { targetAxisIndex: 1 },
},
vAxes: {
0: { title: "Flake rate", minValue: 0, maxValue: 100 },
1: { title: "Duration (seconds)" },
},
tooltip: { trigger: "selection", isHtml: true }
};
const chart = new google.visualization.LineChart(document.getElementById('chart_div'));
chart.draw(data, options);
}
init();

View File

@ -0,0 +1,31 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Takes a CSV file through stdin, compresses it and writes it to stdout.
# Example usage: < data.csv ./optimize_data.sh > data_optimized.csv
set -eu -o pipefail
# Take input CSV. For each field, if it is the same as the previous row, replace it with an empty string.
# This is to compress the input CSV. Example:
# Input:
# hash,2021-06-10,Docker_Linux,TestFunctional,Passed,0.5
# hash,2021-06-10,Docker_Linux_containerd,TestFunctional,Failed,0.6
#
# Output:
# hash,2021-06-10,Docker_Linux,TestFunctional,Passed,0.5
# ,,DockerLinux_containerd,,Failed,0.6
awk -F, 'BEGIN {OFS = FS} { for(i=1; i<=NF; i++) { if($i == j[i]) { $i = ""; } else { j[i] = $i; } } printf "%s\n",$0 }'

View File

@ -0,0 +1,32 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Takes a series of gopogh summary jsons, and formats them into a CSV file with
# a row for each test.
# Example usage: cat gopogh_1.json gopogh_2.json gopogh_3.json | ./process_data.sh
set -eu -o pipefail
# Print header.
printf "Commit Hash,Test Date,Environment,Test,Status,Duration\n"
# Turn each test in each summary file to a CSV line containing its commit hash, date, environment, test, status, and duration.
# Example line:
# 247982745892,2021-06-10,Docker_Linux,TestFunctional,Passed,0.5
jq -r '((.PassedTests[]? as $name | {commit: (.Detail.Details | split(":") | .[0]), date: (.Detail.Details | split(":") | .[1]), environment: .Detail.Name, test: $name, duration: .Durations[$name], status: "Passed"}),
(.FailedTests[]? as $name | {commit: (.Detail.Details | split(":") | .[0]), date: (.Detail.Details | split(":") | .[1]), environment: .Detail.Name, test: $name, duration: .Durations[$name], status: "Failed"}),
(.SkippedTests[]? as $name | {commit: (.Detail.Details | split(":") | .[0]), date: (.Detail.Details | split(":") | .[1]), environment: .Detail.Name, test: $name, duration: 0, status: "Skipped"}))
| .commit + "," + .date + "," + .environment + "," + .test + "," + .status + "," + (.duration | tostring)'

View File

@ -0,0 +1,87 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Creates a comment on the provided PR number, using the provided gopogh summary
# to list out the flake rates of all failing tests.
# Example usage: ./report_flakes.sh 11602 gopogh.json Docker_Linux
set -eu -o pipefail
if [ "$#" -ne 3 ]; then
echo "Wrong number of arguments. Usage: report_flakes.sh <PR number> <gopogh_summary.json> <environment>" 1>&2
exit 1
fi
PR_NUMBER=$1
SUMMARY_DATA=$2
ENVIRONMENT=$3
# To prevent having a super-long comment, add a maximum number of tests to report.
MAX_REPORTED_TESTS=30
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
TMP_DATA=$(mktemp)
# 1) Process the data in the gopogh summary.
# 2) Filter tests to only include failed tests on the environment (and only get their names).
# 3) Sort the names of the tests.
# 4) Store in file $TMP_DATA.
< "$SUMMARY_DATA" $DIR/process_data.sh \
| sed -n -r -e "s/[0-9a-f]*,[0-9-]*,$ENVIRONMENT,([a-zA-Z\/_-]*),Failed,[.0-9]*/\1/p" \
| sort \
> "$TMP_DATA"
# Download the precomputed flake rates from the GCS bucket into file $TMP_FLAKE_RATES.
TMP_FLAKE_RATES=$(mktemp)
gsutil cp gs://minikube-flake-rate/flake_rates.csv "$TMP_FLAKE_RATES"
TMP_FAILED_RATES="$TMP_FLAKE_RATES\_filtered"
# 1) Parse/filter the flake rates to only include the test name and flake rates for environment.
# 2) Sort the flake rates based on test name.
# 3) Join the flake rates with the failing tests to only get flake rates of failing tests.
# 4) Sort failed test flake rates based on the flakiness of that test - stable tests should be first on the list.
# 5) Store in file $TMP_FAILED_RATES.
< "$TMP_FLAKE_RATES" sed -n -r -e "s/$ENVIRONMENT,([a-zA-Z\/_-]*),([.0-9]*),[.0-9]*/\1,\2/p" \
| sort -t, -k1,1 \
| join -t , -j 1 "$TMP_DATA" - \
| sort -g -t, -k2,2 \
> "$TMP_FAILED_RATES"
FAILED_RATES_LINES=$(wc -l < "$TMP_FAILED_RATES")
if [[ "$FAILED_RATES_LINES" -gt 30 ]]; then
echo "No failed tests! Aborting without commenting..." 1>&2
exit 0
fi
# Create the comment template.
TMP_COMMENT=$(mktemp)
printf "These are the flake rates of all failed tests on %s.\n|Failed Tests|Flake Rate (%%)|\n|---|---|\n" "$ENVIRONMENT" > "$TMP_COMMENT"
# 1) Get the first $MAX_REPORTED_TESTS lines.
# 2) Print a row in the table with the test name, flake rate, and a link to the flake chart for that test.
# 3) Append these rows to file $TMP_COMMENT.
< "$TMP_FAILED_RATES" head -n $MAX_REPORTED_TESTS \
| sed -n -r -e "s/([a-zA-Z\/_-]*),([.0-9]*)/|\1|\2 ([chart](https:\/\/storage.googleapis.com\/minikube-flake-rate\/flake_chart.html?env=$ENVIRONMENT\&test=\1))|/p" \
>> "$TMP_COMMENT"
# If there are too many failing tests, add an extra row explaining this, and a message after the table.
if [[ "$FAILED_RATES_LINES" -gt 30 ]]; then
printf "|More tests...|Continued...|\n\nToo many tests failed - See test logs for more details." >> "$TMP_COMMENT"
fi
# install gh if not present
sudo $DIR/../installers/check_install_gh.sh || true
gh issue comment "https://github.com/kubernetes/minikube/pull/$PR_NUMBER" --body "$(cat $TMP_COMMENT)"

View File

@ -0,0 +1,43 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Takes a gopogh summary, extracts test data as a CSV and appends to the
# existing CSV data in the GCS bucket.
# Example usage: ./jenkins_upload_tests.sh gopogh_summary.json
set -eu -o pipefail
if [ "$#" -ne 1 ]; then
echo "Wrong number of arguments. Usage: jenkins_upload_tests.sh <gopogh_summary.json>" 1>&2
exit 1
fi
TMP_DATA=$(mktemp)
# Use the gopogh summary, process it, optimize the data, remove the header, and store.
<"$1" ./test-flake-chart/process_data.sh \
| ./test-flake-chart/optimize_data.sh \
| sed "1d" > $TMP_DATA
GCS_TMP="gs://minikube-flake-rate/$(basename "$TMP_DATA")"
# Copy data to append to GCS
gsutil cp $TMP_DATA $GCS_TMP
# Append data to existing data.
gsutil compose gs://minikube-flake-rate/data.csv $GCS_TMP gs://minikube-flake-rate/data.csv
# Clear all the temp stuff.
rm $TMP_DATA
gsutil rm $GCS_TMP

View File

@ -47,3 +47,7 @@ gsutil -qm cp "${HTML_OUT}" "gs://${JOB_GCS_BUCKET}.html" || true
SUMMARY_OUT="$ARTIFACTS/summary.txt"
echo ">> uploading ${SUMMARY_OUT}"
gsutil -qm cp "${SUMMARY_OUT}" "gs://${JOB_GCS_BUCKET}_summary.json" || true
if [[ "${MINIKUBE_LOCATION}" == "master" ]]; then
./test-flake-chart/jenkins_upload_tests.sh "${SUMMARY_OUT}"
fi