diff --git a/CHANGELOG.md b/CHANGELOG.md index 2615c899d8..c8ed7653db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ ## v1.3.11.0 [unreleased] ### Bug Fixes +1. [#2157](https://github.com/influxdata/chronograf/pull/2157): Fix logscale producing console errors when only one point in graph +1. [#2158](https://github.com/influxdata/chronograf/pull/2158): Fix 'Cannot connect to source' false error flag on Dashboard page +1. [#2167](https://github.com/influxdata/chronograf/pull/2167): Add fractions of seconds to time field in csv export +1. [#1077](https://github.com/influxdata/chronograf/pull/2087): Fix Chronograf requiring Telegraf's CPU and system plugins to ensure that all Apps appear on the HOST LIST page. + ### Features ### UI Improvements diff --git a/server/logger.go b/server/logger.go index 81465aafab..3ca5ab24f9 100644 --- a/server/logger.go +++ b/server/logger.go @@ -7,39 +7,57 @@ import ( "github.com/influxdata/chronograf" ) -type logResponseWriter struct { +// statusWriterFlusher captures the status header of an http.ResponseWriter +// and is a flusher +type statusWriter struct { http.ResponseWriter - - responseCode int + Flusher http.Flusher + status int } -func (l *logResponseWriter) WriteHeader(status int) { - l.responseCode = status - l.ResponseWriter.WriteHeader(status) +func (w *statusWriter) WriteHeader(status int) { + w.status = status + w.ResponseWriter.WriteHeader(status) +} + +func (w *statusWriter) Status() int { return w.status } + +// Flush is here because the underlying HTTP chunked transfer response writer +// to implement http.Flusher. Without it data is silently buffered. This +// was discovered when proxying kapacitor chunked logs. +func (w *statusWriter) Flush() { + if w.Flusher != nil { + w.Flusher.Flush() + } } // Logger is middleware that logs the request func Logger(logger chronograf.Logger, next http.Handler) http.Handler { fn := func(w http.ResponseWriter, r *http.Request) { now := time.Now() - logger. - WithField("component", "server"). + logger.WithField("component", "server"). WithField("remote_addr", r.RemoteAddr). WithField("method", r.Method). WithField("url", r.URL). - Info("Request") + Debug("Request") - lrr := &logResponseWriter{w, 0} - next.ServeHTTP(lrr, r) + sw := &statusWriter{ + ResponseWriter: w, + } + if f, ok := w.(http.Flusher); ok { + sw.Flusher = f + } + next.ServeHTTP(sw, r) later := time.Now() elapsed := later.Sub(now) logger. WithField("component", "server"). WithField("remote_addr", r.RemoteAddr). + WithField("method", r.Method). WithField("response_time", elapsed.String()). - WithField("code", lrr.responseCode). - Info("Response: ", http.StatusText(lrr.responseCode)) + WithField("status", sw.Status()). + Info("Response: ", http.StatusText(sw.Status())) } return http.HandlerFunc(fn) } diff --git a/server/prefixing_redirector.go b/server/prefixing_redirector.go index 86f957efab..2c4652d870 100644 --- a/server/prefixing_redirector.go +++ b/server/prefixing_redirector.go @@ -9,7 +9,8 @@ import ( type interceptingResponseWriter struct { http.ResponseWriter - Prefix string + Flusher http.Flusher + Prefix string } func (i *interceptingResponseWriter) WriteHeader(status int) { @@ -25,11 +26,26 @@ func (i *interceptingResponseWriter) WriteHeader(status int) { i.ResponseWriter.WriteHeader(status) } -// PrefixingRedirector alters the Location header of downstream http.Handlers +// Flush is here because the underlying HTTP chunked transfer response writer +// to implement http.Flusher. Without it data is silently buffered. This +// was discovered when proxying kapacitor chunked logs. +func (i *interceptingResponseWriter) Flush() { + if i.Flusher != nil { + i.Flusher.Flush() + } +} + +// PrefixedRedirect alters the Location header of downstream http.Handlers // to include a specified prefix func PrefixedRedirect(prefix string, next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - iw := &interceptingResponseWriter{w, prefix} + iw := &interceptingResponseWriter{ + ResponseWriter: w, + Prefix: prefix, + } + if flusher, ok := w.(http.Flusher); ok { + iw.Flusher = flusher + } next.ServeHTTP(iw, r) }) } diff --git a/server/proxy.go b/server/proxy.go index b00709e072..3ba5713c59 100644 --- a/server/proxy.go +++ b/server/proxy.go @@ -5,6 +5,8 @@ import ( "net/http" "net/http/httputil" "net/url" + "strings" + "time" ) // KapacitorProxy proxies requests to kapacitor using the path query parameter. @@ -34,28 +36,33 @@ func (h *Service) KapacitorProxy(w http.ResponseWriter, r *http.Request) { return } - u, err := url.Parse(srv.URL) + // To preserve any HTTP query arguments to the kapacitor path, + // we concat and parse them into u. + uri := singleJoiningSlash(srv.URL, path) + u, err := url.Parse(uri) if err != nil { msg := fmt.Sprintf("Error parsing kapacitor url: %v", err) Error(w, http.StatusUnprocessableEntity, msg, h.Logger) return } - u.Path = path - director := func(req *http.Request) { // Set the Host header of the original Kapacitor URL req.Host = u.Host - req.URL = u + // Because we are acting as a proxy, kapacitor needs to have the basic auth information set as // a header directly if srv.Username != "" && srv.Password != "" { req.SetBasicAuth(srv.Username, srv.Password) } } + + // Without a FlushInterval the HTTP Chunked response for kapacitor logs is + // buffered and flushed every 30 seconds. proxy := &httputil.ReverseProxy{ - Director: director, + Director: director, + FlushInterval: time.Second, } proxy.ServeHTTP(w, r) } @@ -79,3 +86,15 @@ func (h *Service) KapacitorProxyGet(w http.ResponseWriter, r *http.Request) { func (h *Service) KapacitorProxyDelete(w http.ResponseWriter, r *http.Request) { h.KapacitorProxy(w, r) } + +func singleJoiningSlash(a, b string) string { + aslash := strings.HasSuffix(a, "/") + bslash := strings.HasPrefix(b, "/") + if aslash && bslash { + return a + b[1:] + } + if !aslash && !bslash { + return a + "/" + b + } + return a + b +} diff --git a/ui/spec/shared/parsing/resultsToCSVSpec.js b/ui/spec/shared/parsing/resultsToCSVSpec.js index 6945e713db..aa8199566a 100644 --- a/ui/spec/shared/parsing/resultsToCSVSpec.js +++ b/ui/spec/shared/parsing/resultsToCSVSpec.js @@ -3,13 +3,16 @@ import { formatDate, dashboardtoCSV, } from 'shared/parsing/resultsToCSV' +import moment from 'moment' describe('formatDate', () => { it('converts timestamp to an excel compatible date string', () => { const timestamp = 1000000000000 const result = formatDate(timestamp) expect(result).to.be.a('string') - expect(+new Date(result)).to.equal(timestamp) + expect(moment(result, 'M/D/YYYY h:mm:ss.SSSSSSSSS A').valueOf()).to.equal( + timestamp + ) }) }) diff --git a/ui/src/dashboards/actions/index.js b/ui/src/dashboards/actions/index.js index b151b5e19e..0108e3ab96 100644 --- a/ui/src/dashboards/actions/index.js +++ b/ui/src/dashboards/actions/index.js @@ -281,7 +281,8 @@ export const updateTempVarValues = (source, dashboard) => async dispatch => { results.forEach(({data}, i) => { const {type, query, id} = tempsWithQueries[i] - const vals = parsers[type](data, query.tagKey || query.measurement)[type] + const parsed = parsers[type](data, query.tagKey || query.measurement) + const vals = parsed[type] dispatch(editTemplateVariableValues(dashboard.id, id, vals)) }) } catch (error) { diff --git a/ui/src/dashboards/components/AxesOptions.js b/ui/src/dashboards/components/AxesOptions.js index d44640ea78..fcce69a055 100644 --- a/ui/src/dashboards/components/AxesOptions.js +++ b/ui/src/dashboards/components/AxesOptions.js @@ -6,6 +6,7 @@ import {Tabber, Tab} from 'src/dashboards/components/Tabber' import {DISPLAY_OPTIONS, TOOLTIP_CONTENT} from 'src/dashboards/constants' const {LINEAR, LOG, BASE_2, BASE_10} = DISPLAY_OPTIONS +const getInputMin = scale => (scale === LOG ? '0' : null) const AxesOptions = ({ axes: {y: {bounds, label, prefix, suffix, base, scale, defaultYLabel}}, @@ -38,6 +39,7 @@ const AxesOptions = ({ customValue={min} onSetValue={onSetYAxisBoundMin} type="number" + min={getInputMin(scale)} />