Reintroduce :interval: work
This reverts commit 4b54081cf8
.
:interval: represents a duration now
queryConfig must recognize both :interval: forms
Another case to consider
Fix broken links in CHANGELOG
Rudimentary migration system w/ :interval: migration
Only looking for :interval: now
Server can expect time(:interval:) from now on
Repair go specs
pull/3120/head
parent
b0126b66ef
commit
9eb7a8cdd4
|
@ -73,6 +73,7 @@
|
|||
1. [#2829](https://github.com/influxdata/chronograf/pull/2829): Add an optional persistent legend which can toggle series visibility to dashboard cells
|
||||
1. [#2846](https://github.com/influxdata/chronograf/pull/2846): Allow user to annotate graphs via UI or API
|
||||
|
||||
1. [#2829](https://github.com/influxdata/chronograf/pull/2829): Add an optional persistent legend which can toggle series visibility to dashboard cells
|
||||
### UI Improvements
|
||||
|
||||
1. [#2848](https://github.com/influxdata/chronograf/pull/2848): Add ability to set a prefix and suffix on Single Stat and Gauge cell types
|
||||
|
|
|
@ -13,6 +13,7 @@ var _ chronograf.BuildStore = &BuildStore{}
|
|||
|
||||
// BuildBucket is the bolt bucket used to store Chronograf build information
|
||||
var BuildBucket = []byte("Build")
|
||||
var SchemaVersionBucket = []byte("SchemaVersions")
|
||||
|
||||
// BuildKey is the constant key used in the bolt bucket
|
||||
var BuildKey = []byte("build")
|
||||
|
@ -22,6 +23,33 @@ type BuildStore struct {
|
|||
client *Client
|
||||
}
|
||||
|
||||
func (s *BuildStore) IsMigrationComplete(version string) (bool, error) {
|
||||
complete := false
|
||||
if err := s.client.db.View(func(tx *bolt.Tx) error {
|
||||
migration := tx.Bucket(SchemaVersionBucket).Get([]byte(version))
|
||||
if migration != nil {
|
||||
complete = true
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
// If there's an error, represent this migration as complete
|
||||
// so that future downstream code doesn't run it
|
||||
return true, err
|
||||
}
|
||||
|
||||
return complete, nil
|
||||
}
|
||||
|
||||
func (s *BuildStore) MarkMigrationAsComplete(version string) error {
|
||||
if err := s.client.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.Bucket(SchemaVersionBucket).Put([]byte(version), []byte{})
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves Chronograf build information from the database
|
||||
func (s *BuildStore) Get(ctx context.Context) (chronograf.BuildInfo, error) {
|
||||
var build chronograf.BuildInfo
|
||||
|
|
|
@ -121,6 +121,10 @@ func (c *Client) Open(ctx context.Context, logger chronograf.Logger, build chron
|
|||
// initialize creates Buckets that are missing
|
||||
func (c *Client) initialize(ctx context.Context) error {
|
||||
if err := c.db.Update(func(tx *bolt.Tx) error {
|
||||
// Always create SchemaVersions bucket.
|
||||
if _, err := tx.CreateBucketIfNotExists(SchemaVersionBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
// Always create Organizations bucket.
|
||||
if _, err := tx.CreateBucketIfNotExists(OrganizationsBucket); err != nil {
|
||||
return err
|
||||
|
@ -181,7 +185,7 @@ func (c *Client) migrate(ctx context.Context, build chronograf.BuildInfo) error
|
|||
if err := c.LayoutsStore.Migrate(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.DashboardsStore.Migrate(ctx); err != nil {
|
||||
if err := c.DashboardsStore.Migrate(ctx, build); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.ConfigStore.Migrate(ctx); err != nil {
|
||||
|
|
|
@ -3,6 +3,7 @@ package bolt
|
|||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/influxdata/chronograf"
|
||||
|
@ -48,7 +49,7 @@ func (d *DashboardsStore) AddIDs(ctx context.Context, boards []chronograf.Dashbo
|
|||
}
|
||||
|
||||
// Migrate updates the dashboards at runtime
|
||||
func (d *DashboardsStore) Migrate(ctx context.Context) error {
|
||||
func (d *DashboardsStore) Migrate(ctx context.Context, build chronograf.BuildInfo) error {
|
||||
// 1. Add UUIDs to cells without one
|
||||
boards, err := d.All(ctx)
|
||||
if err != nil {
|
||||
|
@ -72,6 +73,45 @@ func (d *DashboardsStore) Migrate(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
|
||||
return d.migrationChangeIntervalToDuration(ctx)
|
||||
}
|
||||
|
||||
// migrationChangeIntervalToDuration (1.4.2)
|
||||
// Before, we supported queries that included `GROUP BY :interval:`
|
||||
// After, we only support queries with `GROUP BY time(:interval:)`
|
||||
// thereby allowing non_negative_derivative(_____, :interval)
|
||||
func (d *DashboardsStore) migrationChangeIntervalToDuration(ctx context.Context) error {
|
||||
versionTimestamp := "20180228182849"
|
||||
complete, err := d.client.BuildStore.IsMigrationComplete(versionTimestamp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if complete {
|
||||
return nil
|
||||
}
|
||||
|
||||
d.client.logger.Info("Running migrationChangeIntervalToDuration (", versionTimestamp, ")")
|
||||
|
||||
boards, err := d.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, board := range boards {
|
||||
for i, cell := range board.Cells {
|
||||
for i, query := range cell.Queries {
|
||||
query.Command = strings.Replace(query.Command, ":interval:", "time(:interval:)", -1)
|
||||
cell.Queries[i] = query
|
||||
}
|
||||
|
||||
board.Cells[i] = cell
|
||||
}
|
||||
|
||||
d.Update(ctx, board)
|
||||
}
|
||||
|
||||
d.client.BuildStore.MarkMigrationAsComplete(versionTimestamp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -63,8 +63,9 @@ func ParseTime(influxQL string, now time.Time) (time.Duration, error) {
|
|||
func Convert(influxQL string) (chronograf.QueryConfig, error) {
|
||||
itsDashboardTime := false
|
||||
intervalTime := false
|
||||
|
||||
if strings.Contains(influxQL, ":interval:") {
|
||||
influxQL = strings.Replace(influxQL, ":interval:", "time(1234s)", 1)
|
||||
influxQL = strings.Replace(influxQL, ":interval:", "8675309ns", -1)
|
||||
intervalTime = true
|
||||
}
|
||||
|
||||
|
@ -83,7 +84,7 @@ func Convert(influxQL string) (chronograf.QueryConfig, error) {
|
|||
}
|
||||
|
||||
if intervalTime {
|
||||
influxQL = strings.Replace(influxQL, "time(1234s)", ":interval:", 1)
|
||||
influxQL = strings.Replace(influxQL, "8675309ns", ":interval:", -1)
|
||||
}
|
||||
|
||||
raw := chronograf.QueryConfig{
|
||||
|
|
|
@ -12,6 +12,10 @@ import (
|
|||
// SortTemplates the templates by size, then type, then value.
|
||||
func SortTemplates(ts []chronograf.TemplateVar) []chronograf.TemplateVar {
|
||||
sort.Slice(ts, func(i, j int) bool {
|
||||
if ts[i].Var == ":interval:" {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(ts[i].Values) != len(ts[j].Values) {
|
||||
return len(ts[i].Values) < len(ts[j].Values)
|
||||
}
|
||||
|
@ -59,16 +63,8 @@ func RenderTemplate(query string, t chronograf.TemplateVar, now time.Time) (stri
|
|||
tv[t.Values[i].Type] = t.Values[i].Value
|
||||
}
|
||||
|
||||
if res, ok := tv["resolution"]; ok {
|
||||
resolution, err := strconv.ParseInt(res, 0, 64)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ppp, ok := tv["pointsPerPixel"]
|
||||
if !ok {
|
||||
ppp = "3"
|
||||
}
|
||||
pixelsPerPoint, err := strconv.ParseInt(ppp, 0, 64)
|
||||
if pts, ok := tv["points"]; ok {
|
||||
points, err := strconv.ParseInt(pts, 0, 64)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -77,28 +73,27 @@ func RenderTemplate(query string, t chronograf.TemplateVar, now time.Time) (stri
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
interval := AutoGroupBy(resolution, pixelsPerPoint, dur)
|
||||
interval := AutoInterval(points, dur)
|
||||
return strings.Replace(query, t.Var, interval, -1), nil
|
||||
}
|
||||
|
||||
return query, nil
|
||||
}
|
||||
|
||||
// AutoGroupBy generates the time to group by in order to decimate the number of
|
||||
// points returned in a query
|
||||
func AutoGroupBy(resolution, pixelsPerPoint int64, duration time.Duration) string {
|
||||
func AutoInterval(points int64, duration time.Duration) string {
|
||||
// The function is: ((total_seconds * millisecond_converstion) / group_by) = pixels / 3
|
||||
// Number of points given the pixels
|
||||
pixels := float64(resolution) / float64(pixelsPerPoint)
|
||||
pixels := float64(points)
|
||||
msPerPixel := float64(duration/time.Millisecond) / pixels
|
||||
secPerPixel := float64(duration/time.Second) / pixels
|
||||
if secPerPixel < 1.0 {
|
||||
if msPerPixel < 1.0 {
|
||||
msPerPixel = 1.0
|
||||
}
|
||||
return "time(" + strconv.FormatInt(int64(msPerPixel), 10) + "ms)"
|
||||
return strconv.FormatInt(int64(msPerPixel), 10) + "ms"
|
||||
}
|
||||
// If groupby is more than 1 second round to the second
|
||||
return "time(" + strconv.FormatInt(int64(secPerPixel), 10) + "s)"
|
||||
return strconv.FormatInt(int64(secPerPixel), 10) + "s"
|
||||
}
|
||||
|
||||
// TemplateReplace replaces templates with values within the query string
|
||||
|
|
|
@ -126,59 +126,79 @@ func TestTemplateReplace(t *testing.T) {
|
|||
want: `SELECT :field: FROM "cpu"`,
|
||||
},
|
||||
{
|
||||
name: "auto group by",
|
||||
query: `SELECT mean(usage_idle) from "cpu" where time > now() - 4320h group by :interval:`,
|
||||
name: "auto interval",
|
||||
query: `SELECT mean(usage_idle) from "cpu" where time > now() - 4320h group by time(:interval:)`,
|
||||
vars: []chronograf.TemplateVar{
|
||||
{
|
||||
Var: ":interval:",
|
||||
Values: []chronograf.TemplateValue{
|
||||
{
|
||||
Value: "1000",
|
||||
Type: "resolution",
|
||||
},
|
||||
{
|
||||
Value: "3",
|
||||
Type: "pointsPerPixel",
|
||||
Value: "333",
|
||||
Type: "points",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: `SELECT mean(usage_idle) from "cpu" where time > now() - 4320h group by time(46655s)`,
|
||||
want: `SELECT mean(usage_idle) from "cpu" where time > now() - 4320h group by time(46702s)`,
|
||||
},
|
||||
{
|
||||
name: "auto interval",
|
||||
query: `SELECT derivative(mean(usage_idle),:interval:) from "cpu" where time > now() - 4320h group by time(:interval:)`,
|
||||
vars: []chronograf.TemplateVar{
|
||||
{
|
||||
Var: ":interval:",
|
||||
Values: []chronograf.TemplateValue{
|
||||
{
|
||||
Value: "333",
|
||||
Type: "points",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: `SELECT derivative(mean(usage_idle),46702s) from "cpu" where time > now() - 4320h group by time(46702s)`,
|
||||
},
|
||||
{
|
||||
name: "auto group by",
|
||||
query: `SELECT mean(usage_idle) from "cpu" where time > now() - 4320h group by time(:interval:)`,
|
||||
vars: []chronograf.TemplateVar{
|
||||
{
|
||||
Var: ":interval:",
|
||||
Values: []chronograf.TemplateValue{
|
||||
{
|
||||
Value: "333",
|
||||
Type: "points",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: `SELECT mean(usage_idle) from "cpu" where time > now() - 4320h group by time(46702s)`,
|
||||
},
|
||||
{
|
||||
name: "auto group by without duration",
|
||||
query: `SELECT mean(usage_idle) from "cpu" WHERE time > now() - 4320h group by :interval:`,
|
||||
query: `SELECT mean(usage_idle) from "cpu" WHERE time > now() - 4320h group by time(:interval:)`,
|
||||
vars: []chronograf.TemplateVar{
|
||||
{
|
||||
Var: ":interval:",
|
||||
Values: []chronograf.TemplateValue{
|
||||
{
|
||||
Value: "1000",
|
||||
Type: "resolution",
|
||||
},
|
||||
{
|
||||
Value: "3",
|
||||
Type: "pointsPerPixel",
|
||||
Value: "333",
|
||||
Type: "points",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: `SELECT mean(usage_idle) from "cpu" WHERE time > now() - 4320h group by time(46655s)`,
|
||||
want: `SELECT mean(usage_idle) from "cpu" WHERE time > now() - 4320h group by time(46702s)`,
|
||||
},
|
||||
{
|
||||
name: "auto group by with :dashboardTime:",
|
||||
query: `SELECT mean(usage_idle) from "cpu" WHERE time > :dashboardTime: group by :interval:`,
|
||||
query: `SELECT mean(usage_idle) from "cpu" WHERE time > :dashboardTime: group by time(:interval:)`,
|
||||
vars: []chronograf.TemplateVar{
|
||||
{
|
||||
Var: ":interval:",
|
||||
Values: []chronograf.TemplateValue{
|
||||
{
|
||||
Value: "1000",
|
||||
Type: "resolution",
|
||||
},
|
||||
{
|
||||
Value: "3",
|
||||
Type: "pointsPerPixel",
|
||||
Value: "333",
|
||||
Type: "points",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -192,22 +212,18 @@ func TestTemplateReplace(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
want: `SELECT mean(usage_idle) from "cpu" WHERE time > now() - 4320h group by time(46655s)`,
|
||||
want: `SELECT mean(usage_idle) from "cpu" WHERE time > now() - 4320h group by time(46702s)`,
|
||||
},
|
||||
{
|
||||
name: "auto group by failing condition",
|
||||
query: `SELECT mean(usage_idle) FROM "cpu" WHERE time > :dashboardTime: GROUP BY :interval:`,
|
||||
query: `SELECT mean(usage_idle) FROM "cpu" WHERE time > :dashboardTime: GROUP BY time(:interval:)`,
|
||||
vars: []chronograf.TemplateVar{
|
||||
{
|
||||
Var: ":interval:",
|
||||
Values: []chronograf.TemplateValue{
|
||||
{
|
||||
Value: "115",
|
||||
Type: "resolution",
|
||||
},
|
||||
{
|
||||
Value: "3",
|
||||
Type: "pointsPerPixel",
|
||||
Value: "38",
|
||||
Type: "points",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -222,27 +238,23 @@ func TestTemplateReplace(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
want: `SELECT mean(usage_idle) FROM "cpu" WHERE time > now() - 1h GROUP BY time(93s)`,
|
||||
want: `SELECT mean(usage_idle) FROM "cpu" WHERE time > now() - 1h GROUP BY time(94s)`,
|
||||
},
|
||||
{
|
||||
name: "no template variables specified",
|
||||
query: `SELECT mean(usage_idle) FROM "cpu" WHERE time > :dashboardTime: GROUP BY :interval:`,
|
||||
want: `SELECT mean(usage_idle) FROM "cpu" WHERE time > :dashboardTime: GROUP BY :interval:`,
|
||||
query: `SELECT mean(usage_idle) FROM "cpu" WHERE time > :dashboardTime: GROUP BY time(:interval:)`,
|
||||
want: `SELECT mean(usage_idle) FROM "cpu" WHERE time > :dashboardTime: GROUP BY time(:interval:)`,
|
||||
},
|
||||
{
|
||||
name: "auto group by failing condition",
|
||||
query: `SELECT mean(usage_idle) FROM "cpu" WHERE time > :dashboardTime: GROUP BY :interval:`,
|
||||
query: `SELECT mean(usage_idle) FROM "cpu" WHERE time > :dashboardTime: GROUP BY time(:interval:)`,
|
||||
vars: []chronograf.TemplateVar{
|
||||
{
|
||||
Var: ":interval:",
|
||||
Values: []chronograf.TemplateValue{
|
||||
{
|
||||
Value: "115",
|
||||
Type: "resolution",
|
||||
},
|
||||
{
|
||||
Value: "3",
|
||||
Type: "pointsPerPixel",
|
||||
Value: "38",
|
||||
Type: "points",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -257,7 +269,7 @@ func TestTemplateReplace(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
want: `SELECT mean(usage_idle) FROM "cpu" WHERE time > now() - 1h GROUP BY time(93s)`,
|
||||
want: `SELECT mean(usage_idle) FROM "cpu" WHERE time > now() - 1h GROUP BY time(94s)`,
|
||||
},
|
||||
{
|
||||
name: "query with no template variables contained should return query",
|
||||
|
@ -268,11 +280,7 @@ func TestTemplateReplace(t *testing.T) {
|
|||
Values: []chronograf.TemplateValue{
|
||||
{
|
||||
Value: "115",
|
||||
Type: "resolution",
|
||||
},
|
||||
{
|
||||
Value: "3",
|
||||
Type: "pointsPerPixel",
|
||||
Type: "points",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -313,12 +321,8 @@ func Test_TemplateVarsUnmarshalling(t *testing.T) {
|
|||
"tempVar": ":interval:",
|
||||
"values": [
|
||||
{
|
||||
"value": "1000",
|
||||
"type": "resolution"
|
||||
},
|
||||
{
|
||||
"value": "3",
|
||||
"type": "pointsPerPixel"
|
||||
"value": "333",
|
||||
"type": "points"
|
||||
},
|
||||
{
|
||||
"value": "10",
|
||||
|
@ -343,12 +347,8 @@ func Test_TemplateVarsUnmarshalling(t *testing.T) {
|
|||
Var: ":interval:",
|
||||
Values: []chronograf.TemplateValue{
|
||||
{
|
||||
Value: "1000",
|
||||
Type: "resolution",
|
||||
},
|
||||
{
|
||||
Value: "3",
|
||||
Type: "pointsPerPixel",
|
||||
Value: "333",
|
||||
Type: "points",
|
||||
},
|
||||
{
|
||||
Value: "10",
|
||||
|
@ -379,46 +379,6 @@ func Test_TemplateVarsUnmarshalling(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAutoGroupBy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
resolution int64
|
||||
pixelsPerPoint int64
|
||||
duration time.Duration
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "String() calculates the GROUP BY interval",
|
||||
resolution: 700,
|
||||
pixelsPerPoint: 3,
|
||||
duration: 24 * time.Hour,
|
||||
want: "time(370s)",
|
||||
},
|
||||
{
|
||||
name: "String() milliseconds if less than one second intervals",
|
||||
resolution: 100000,
|
||||
pixelsPerPoint: 3,
|
||||
duration: time.Hour,
|
||||
want: "time(107ms)",
|
||||
},
|
||||
{
|
||||
name: "String() milliseconds if less than one millisecond",
|
||||
resolution: 100000,
|
||||
pixelsPerPoint: 3,
|
||||
duration: time.Second,
|
||||
want: "time(1ms)",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := AutoGroupBy(tt.resolution, tt.pixelsPerPoint, tt.duration)
|
||||
if got != tt.want {
|
||||
t.Errorf("TestAutoGroupBy %s =\n%s\nwant\n%s", tt.name, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_RenderTemplate(t *testing.T) {
|
||||
gbvTests := []struct {
|
||||
name string
|
||||
|
@ -428,38 +388,38 @@ func Test_RenderTemplate(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "relative time only lower bound with one day of duration",
|
||||
query: "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 1d GROUP BY :interval:",
|
||||
resolution: 1000,
|
||||
query: "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 1d GROUP BY time(:interval:)",
|
||||
resolution: 333,
|
||||
want: "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 1d GROUP BY time(259s)",
|
||||
},
|
||||
{
|
||||
name: "relative time offset by week",
|
||||
query: "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 1d - 7d AND time < now() - 7d GROUP BY :interval:",
|
||||
resolution: 1000,
|
||||
query: "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 1d - 7d AND time < now() - 7d GROUP BY time(:interval:)",
|
||||
resolution: 333,
|
||||
want: "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 1d - 7d AND time < now() - 7d GROUP BY time(259s)",
|
||||
},
|
||||
{
|
||||
name: "relative time with relative upper bound with one minute of duration",
|
||||
query: "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 3m AND time < now() - 2m GROUP BY :interval:",
|
||||
resolution: 1000,
|
||||
want: "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 3m AND time < now() - 2m GROUP BY time(179ms)",
|
||||
query: "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 3m AND time < now() - 2m GROUP BY time(:interval:)",
|
||||
resolution: 333,
|
||||
want: "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 3m AND time < now() - 2m GROUP BY time(180ms)",
|
||||
},
|
||||
{
|
||||
name: "relative time with relative lower bound and now upper with one day of duration",
|
||||
query: "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 1d AND time < now() GROUP BY :interval:",
|
||||
resolution: 1000,
|
||||
query: "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 1d AND time < now() GROUP BY time(:interval:)",
|
||||
resolution: 333,
|
||||
want: "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 1d AND time < now() GROUP BY time(259s)",
|
||||
},
|
||||
{
|
||||
name: "absolute time with one minute of duration",
|
||||
query: "SELECT mean(usage_idle) FROM cpu WHERE time > '1985-10-25T00:01:00Z' and time < '1985-10-25T00:02:00Z' GROUP BY :interval:",
|
||||
resolution: 1000,
|
||||
want: "SELECT mean(usage_idle) FROM cpu WHERE time > '1985-10-25T00:01:00Z' and time < '1985-10-25T00:02:00Z' GROUP BY time(179ms)",
|
||||
query: "SELECT mean(usage_idle) FROM cpu WHERE time > '1985-10-25T00:01:00Z' and time < '1985-10-25T00:02:00Z' GROUP BY time(:interval:)",
|
||||
resolution: 333,
|
||||
want: "SELECT mean(usage_idle) FROM cpu WHERE time > '1985-10-25T00:01:00Z' and time < '1985-10-25T00:02:00Z' GROUP BY time(180ms)",
|
||||
},
|
||||
{
|
||||
name: "absolute time with nano seconds and zero duration",
|
||||
query: "SELECT mean(usage_idle) FROM cpu WHERE time > '2017-07-24T15:33:42.994Z' and time < '2017-07-24T15:33:42.994Z' GROUP BY :interval:",
|
||||
resolution: 1000,
|
||||
query: "SELECT mean(usage_idle) FROM cpu WHERE time > '2017-07-24T15:33:42.994Z' and time < '2017-07-24T15:33:42.994Z' GROUP BY time(:interval:)",
|
||||
resolution: 333,
|
||||
want: "SELECT mean(usage_idle) FROM cpu WHERE time > '2017-07-24T15:33:42.994Z' and time < '2017-07-24T15:33:42.994Z' GROUP BY time(1ms)",
|
||||
},
|
||||
{
|
||||
|
@ -480,7 +440,7 @@ func Test_RenderTemplate(t *testing.T) {
|
|||
Values: []chronograf.TemplateValue{
|
||||
{
|
||||
Value: fmt.Sprintf("%d", tt.resolution),
|
||||
Type: "resolution",
|
||||
Type: "points",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -463,7 +463,7 @@ func TestServer(t *testing.T) {
|
|||
"name": "Untitled Cell",
|
||||
"queries": [
|
||||
{
|
||||
"query": "SELECT mean(\"value\") AS \"mean_value\" FROM \"telegraf\".\"autogen\".\"cpg\" WHERE time > :dashboardTime: GROUP BY :interval: FILL(null)",
|
||||
"query": "SELECT mean(\"value\") AS \"mean_value\" FROM \"telegraf\".\"autogen\".\"cpg\" WHERE time > :dashboardTime: GROUP BY time(:interval:) FILL(null)",
|
||||
"queryConfig": {
|
||||
"database": "telegraf",
|
||||
"measurement": "cpg",
|
||||
|
@ -718,7 +718,7 @@ func TestServer(t *testing.T) {
|
|||
"name": "Untitled Cell",
|
||||
"queries": [
|
||||
{
|
||||
"query": "SELECT mean(\"value\") AS \"mean_value\" FROM \"telegraf\".\"autogen\".\"cpg\" WHERE time > :dashboardTime: GROUP BY :interval: FILL(null)",
|
||||
"query": "SELECT mean(\"value\") AS \"mean_value\" FROM \"telegraf\".\"autogen\".\"cpg\" WHERE time > :dashboardTime: GROUP BY time(:interval:) FILL(null)",
|
||||
"queryConfig": {
|
||||
"database": "telegraf",
|
||||
"measurement": "cpg",
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
"name": "Untitled Cell",
|
||||
"queries": [
|
||||
{
|
||||
"query": "SELECT mean(\"value\") AS \"mean_value\" FROM \"telegraf\".\"autogen\".\"cpg\" WHERE time \u003e :dashboardTime: GROUP BY :interval: FILL(null)",
|
||||
"query": "SELECT mean(\"value\") AS \"mean_value\" FROM \"telegraf\".\"autogen\".\"cpg\" WHERE time \u003e :dashboardTime: GROUP BY time(:interval:) FILL(null)",
|
||||
"queryConfig": {
|
||||
"id": "b20baa61-bacb-4a17-b27d-b904a0d18114",
|
||||
"database": "telegraf",
|
||||
|
|
|
@ -98,7 +98,7 @@ func TestService_Queries(t *testing.T) {
|
|||
r: httptest.NewRequest("POST", "/queries", bytes.NewReader([]byte(`{
|
||||
"queries": [
|
||||
{
|
||||
"query": "SELECT \"pingReq\" FROM :dbs:.\"monitor\".\"httpd\" WHERE time > :dashboardTime: AND time < :upperDashboardTime: GROUP BY :interval:",
|
||||
"query": "SELECT \"pingReq\" FROM :dbs:.\"monitor\".\"httpd\" WHERE time > :dashboardTime: AND time < :upperDashboardTime: GROUP BY time(:interval:)",
|
||||
"id": "82b60d37-251e-4afe-ac93-ca20a3642b11"
|
||||
}
|
||||
],
|
||||
|
@ -155,18 +155,14 @@ func TestService_Queries(t *testing.T) {
|
|||
"tempVar": ":interval:",
|
||||
"values": [
|
||||
{
|
||||
"value": "1000",
|
||||
"type": "resolution"
|
||||
},
|
||||
{
|
||||
"value": "3",
|
||||
"type": "pointsPerPixel"
|
||||
"value": "333",
|
||||
"type": "points"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}`))),
|
||||
want: `{"queries":[{"id":"82b60d37-251e-4afe-ac93-ca20a3642b11","query":"SELECT \"pingReq\" FROM :dbs:.\"monitor\".\"httpd\" WHERE time \u003e :dashboardTime: AND time \u003c :upperDashboardTime: GROUP BY :interval:","queryConfig":{"id":"82b60d37-251e-4afe-ac93-ca20a3642b11","database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SELECT \"pingReq\" FROM :dbs:.\"monitor\".\"httpd\" WHERE time \u003e :dashboardTime: AND time \u003c :upperDashboardTime: GROUP BY :interval:","range":null,"shifts":[]},"queryTemplated":"SELECT \"pingReq\" FROM \"_internal\".\"monitor\".\"httpd\" WHERE time \u003e now() - 15m AND time \u003c now() GROUP BY time(2s)","tempVars":[{"tempVar":":upperDashboardTime:","values":[{"value":"now()","type":"constant","selected":true}]},{"tempVar":":dashboardTime:","values":[{"value":"now() - 15m","type":"constant","selected":true}]},{"tempVar":":dbs:","values":[{"value":"_internal","type":"database","selected":true}]},{"tempVar":":interval:","values":[{"value":"1000","type":"resolution","selected":false},{"value":"3","type":"pointsPerPixel","selected":false}]}]}]}
|
||||
want: `{"queries":[{"id":"82b60d37-251e-4afe-ac93-ca20a3642b11","query":"SELECT \"pingReq\" FROM :dbs:.\"monitor\".\"httpd\" WHERE time \u003e :dashboardTime: AND time \u003c :upperDashboardTime: GROUP BY time(:interval:)","queryConfig":{"id":"82b60d37-251e-4afe-ac93-ca20a3642b11","database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SELECT \"pingReq\" FROM :dbs:.\"monitor\".\"httpd\" WHERE time \u003e :dashboardTime: AND time \u003c :upperDashboardTime: GROUP BY time(:interval:)","range":null,"shifts":[]},"queryTemplated":"SELECT \"pingReq\" FROM \"_internal\".\"monitor\".\"httpd\" WHERE time \u003e now() - 15m AND time \u003c now() GROUP BY time(2s)","tempVars":[{"tempVar":":upperDashboardTime:","values":[{"value":"now()","type":"constant","selected":true}]},{"tempVar":":dashboardTime:","values":[{"value":"now() - 15m","type":"constant","selected":true}]},{"tempVar":":dbs:","values":[{"value":"_internal","type":"database","selected":true}]},{"tempVar":":interval:","values":[{"value":"333","type":"points","selected":false}]}]}]}
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ import {
|
|||
templateControlBarVisibilityToggled as templateControlBarVisibilityToggledAction,
|
||||
} from 'shared/actions/app'
|
||||
import {presentationButtonDispatcher} from 'shared/dispatchers'
|
||||
import {DASHBOARD_LAYOUT_ROW_HEIGHT} from 'shared/constants'
|
||||
import {interval, DASHBOARD_LAYOUT_ROW_HEIGHT} from 'shared/constants'
|
||||
import {notifyDashboardNotFound} from 'shared/copy/notifications'
|
||||
import {colorsStringSchema, colorsNumberSchema} from 'shared/schemas'
|
||||
|
||||
|
@ -334,25 +334,6 @@ class DashboardPage extends Component {
|
|||
],
|
||||
}
|
||||
|
||||
const interval = {
|
||||
id: 'interval',
|
||||
type: 'autoGroupBy',
|
||||
tempVar: ':interval:',
|
||||
label: 'automatically determine the best group by time',
|
||||
values: [
|
||||
{
|
||||
value: '1000', // pixels
|
||||
type: 'resolution',
|
||||
selected: true,
|
||||
},
|
||||
{
|
||||
value: '3',
|
||||
type: 'pointsPerPixel',
|
||||
selected: true,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
let templatesIncludingDashTime
|
||||
if (dashboard) {
|
||||
templatesIncludingDashTime = [
|
||||
|
|
|
@ -8,7 +8,7 @@ import {Table, Column, Cell} from 'fixed-data-table'
|
|||
import Dropdown from 'shared/components/Dropdown'
|
||||
import CustomCell from 'src/data_explorer/components/CustomCell'
|
||||
import TabItem from 'src/data_explorer/components/TableTabItem'
|
||||
import {TEMPLATES} from 'src/data_explorer/constants'
|
||||
import {TEMPLATES} from 'src/shared/constants'
|
||||
|
||||
import {fetchTimeSeriesAsync} from 'shared/actions/timeSeries'
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ import _ from 'lodash'
|
|||
import {fetchTimeSeriesAsync} from 'shared/actions/timeSeries'
|
||||
import {resultsToCSV} from 'src/shared/parsing/resultsToCSV.js'
|
||||
import download from 'src/external/download.js'
|
||||
import {TEMPLATES} from 'src/data_explorer/constants'
|
||||
import {TEMPLATES} from 'src/shared/constants'
|
||||
|
||||
const getCSV = (query, errorThrown) => async () => {
|
||||
try {
|
||||
|
|
|
@ -81,16 +81,3 @@ export const QUERY_TEMPLATES = [
|
|||
{text: 'Show Stats', query: 'SHOW STATS'},
|
||||
{text: 'Show Diagnostics', query: 'SHOW DIAGNOSTICS'},
|
||||
]
|
||||
|
||||
const interval = {
|
||||
id: 'interval',
|
||||
type: 'autoGroupBy',
|
||||
tempVar: ':interval:',
|
||||
label: 'automatically determine the best group by time',
|
||||
values: [
|
||||
{value: '1000', type: 'resolution', selected: true},
|
||||
{value: '3', type: 'pointsPerPixel', selected: true},
|
||||
],
|
||||
} // pixels
|
||||
|
||||
export const TEMPLATES = [interval]
|
||||
|
|
|
@ -7,16 +7,16 @@ import queryString from 'query-string'
|
|||
|
||||
import _ from 'lodash'
|
||||
|
||||
import QueryMaker from '../components/QueryMaker'
|
||||
import Visualization from '../components/Visualization'
|
||||
import QueryMaker from 'src/data_explorer/components/QueryMaker'
|
||||
import Visualization from 'src/data_explorer/components/Visualization'
|
||||
import WriteDataForm from 'src/data_explorer/components/WriteDataForm'
|
||||
import Header from '../containers/Header'
|
||||
import ResizeContainer from 'shared/components/ResizeContainer'
|
||||
import OverlayTechnologies from 'shared/components/OverlayTechnologies'
|
||||
import Header from 'src/data_explorer/containers/Header'
|
||||
import ResizeContainer from 'src/shared/components/ResizeContainer'
|
||||
import OverlayTechnologies from 'src/shared/components/OverlayTechnologies'
|
||||
import ManualRefresh from 'src/shared/components/ManualRefresh'
|
||||
|
||||
import {VIS_VIEWS, AUTO_GROUP_BY} from 'shared/constants'
|
||||
import {MINIMUM_HEIGHTS, INITIAL_HEIGHTS, TEMPLATES} from '../constants'
|
||||
import {VIS_VIEWS, AUTO_GROUP_BY, TEMPLATES} from 'src/shared/constants'
|
||||
import {MINIMUM_HEIGHTS, INITIAL_HEIGHTS} from 'src/data_explorer/constants'
|
||||
import {errorThrown} from 'shared/actions/errors'
|
||||
import {setAutoRefresh} from 'shared/actions/app'
|
||||
import * as dataExplorerActionCreators from 'src/data_explorer/actions/view'
|
||||
|
|
|
@ -4,6 +4,7 @@ import _ from 'lodash'
|
|||
|
||||
import {fetchTimeSeriesAsync} from 'shared/actions/timeSeries'
|
||||
import {removeUnselectedTemplateValues} from 'src/dashboards/constants'
|
||||
import {intervalValuesPoints} from 'src/shared/constants'
|
||||
|
||||
const AutoRefresh = ComposedComponent => {
|
||||
class wrapper extends Component {
|
||||
|
@ -97,31 +98,38 @@ const AutoRefresh = ComposedComponent => {
|
|||
const timeSeriesPromises = queries.map(query => {
|
||||
const {host, database, rp} = query
|
||||
|
||||
const templatesWithResolution = templates.map(temp => {
|
||||
const templatesWithIntervalVals = templates.map(temp => {
|
||||
if (temp.tempVar === ':interval:') {
|
||||
if (resolution) {
|
||||
// resize event
|
||||
return {
|
||||
...temp,
|
||||
values: temp.values.map(
|
||||
v => (temp.type === 'resolution' ? {...v, resolution} : v)
|
||||
),
|
||||
values: temp.values.map(v => {
|
||||
if (v.type === 'resolution') {
|
||||
return {...v, value: `${resolution}`}
|
||||
}
|
||||
if (v.type === 'points') {
|
||||
return {
|
||||
...v,
|
||||
value: `${_.toInteger(Number(resolution) / 3)}`,
|
||||
}
|
||||
}
|
||||
return v
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
...temp,
|
||||
values: [
|
||||
...temp.values,
|
||||
{value: '1000', type: 'resolution', selected: true},
|
||||
],
|
||||
values: intervalValuesPoints,
|
||||
}
|
||||
}
|
||||
|
||||
return temp
|
||||
})
|
||||
|
||||
const tempVars = removeUnselectedTemplateValues(templatesWithResolution)
|
||||
|
||||
const tempVars = removeUnselectedTemplateValues(
|
||||
templatesWithIntervalVals
|
||||
)
|
||||
return fetchTimeSeriesAsync(
|
||||
{
|
||||
source: host,
|
||||
|
|
|
@ -427,6 +427,20 @@ export const DEFAULT_SOURCE = {
|
|||
metaUrl: '',
|
||||
}
|
||||
|
||||
export const intervalValuesPoints = [
|
||||
{value: '333', type: 'points', selected: true},
|
||||
]
|
||||
|
||||
export const interval = {
|
||||
id: 'interval',
|
||||
type: 'autoGroupBy',
|
||||
tempVar: ':interval:',
|
||||
label: 'automatically determine the best group by time',
|
||||
values: intervalValuesPoints,
|
||||
}
|
||||
|
||||
export const TEMPLATES = [interval]
|
||||
|
||||
export const IS_STATIC_LEGEND = legend =>
|
||||
_.get(legend, 'type', false) === 'static'
|
||||
|
||||
|
|
|
@ -157,9 +157,9 @@ function _buildGroupByTime(groupBy) {
|
|||
return ''
|
||||
}
|
||||
|
||||
return ` GROUP BY ${
|
||||
groupBy.time === AUTO_GROUP_BY ? TEMP_VAR_INTERVAL : `time(${groupBy.time})`
|
||||
}`
|
||||
return ` GROUP BY time(${
|
||||
groupBy.time === AUTO_GROUP_BY ? TEMP_VAR_INTERVAL : `${groupBy.time}`
|
||||
})`
|
||||
}
|
||||
|
||||
function _buildGroupByTags(groupBy) {
|
||||
|
|
Loading…
Reference in New Issue