chore: modify/delete some chronograf refs (#21802)
* chore: modify/delete some chronograf refs * chore: update CHANGELOGpull/21824/head
parent
06b403ca57
commit
0cb0da2060
|
@ -30,6 +30,7 @@ This release adds an embedded SQLite database for storing metadata required by t
|
|||
1. [21761](https://github.com/influxdata/influxdb/pull/21761): Ported the `influxd inspect dump-tsm` command from 1.x.
|
||||
1. [21784](https://github.com/influxdata/influxdb/pull/21784): Ported the `influxd inspect dumptsi` command from 1.x.
|
||||
1. [21786](https://github.com/influxdata/influxdb/pull/21786): Ported the `influxd inspect deletetsm` command from 1.x.
|
||||
1. [21802](https://github.com/influxdata/influxdb/pull/21802): Removed unused `chronograf-migator` package & chronograf API service, and updated various "chronograf" references.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
# Chronograf Migrator
|
||||
|
||||
This tool is used to migrate `1.x` Chronograf `Dashboards` and `Template Variables` to their `2.x`
|
||||
equivalents using `pkger` packages. The tool expects the user to have the 1.x Chronograf database.
|
||||
|
||||
```sh
|
||||
chronograf-migrator -h
|
||||
Usage of chronograf-migrator:
|
||||
-db string
|
||||
path to the chronograf database
|
||||
-output string
|
||||
path to the output yaml file (default "dashboards.yml")
|
||||
```
|
||||
|
||||
## Example Usage
|
||||
|
||||
```sh
|
||||
$ chronograf-migrator -db chronograf-v1.db -output dashboards.yml
|
||||
$ INFLUX_TOKEN=<token> influx pkg -o <org-name> -f dashboards.yml
|
||||
```
|
|
@ -1,394 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/v2/kit/platform"
|
||||
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/chronograf"
|
||||
"github.com/influxdata/influxdb/v2/query/influxql"
|
||||
)
|
||||
|
||||
func convert1To2Cell(cell chronograf.DashboardCell) *influxdb.Cell {
|
||||
c := &influxdb.Cell{
|
||||
ID: 1,
|
||||
CellProperty: influxdb.CellProperty{
|
||||
X: cell.X,
|
||||
Y: cell.Y,
|
||||
W: cell.W,
|
||||
H: cell.H,
|
||||
},
|
||||
}
|
||||
|
||||
v := influxdb.View{
|
||||
ViewContents: influxdb.ViewContents{
|
||||
Name: cell.Name,
|
||||
},
|
||||
}
|
||||
|
||||
switch cell.Type {
|
||||
case "line":
|
||||
v.Properties = influxdb.XYViewProperties{
|
||||
Queries: convertQueries(cell.Queries),
|
||||
Axes: convertAxes(cell.Axes),
|
||||
Type: "xy",
|
||||
Geom: "line",
|
||||
ViewColors: convertColors(cell.CellColors),
|
||||
Note: cell.Note,
|
||||
Position: "overlaid",
|
||||
}
|
||||
case "line-stacked":
|
||||
v.Properties = influxdb.XYViewProperties{
|
||||
Queries: convertQueries(cell.Queries),
|
||||
Axes: convertAxes(cell.Axes),
|
||||
Type: "xy",
|
||||
Geom: "line", // TODO(desa): maybe this needs to be stacked?
|
||||
ViewColors: convertColors(cell.CellColors),
|
||||
Note: cell.Note,
|
||||
Position: "stacked",
|
||||
}
|
||||
case "line-stepplot":
|
||||
v.Properties = influxdb.XYViewProperties{
|
||||
Queries: convertQueries(cell.Queries),
|
||||
Axes: convertAxes(cell.Axes),
|
||||
Type: "xy",
|
||||
Geom: "step",
|
||||
ViewColors: convertColors(cell.CellColors),
|
||||
Note: cell.Note,
|
||||
Position: "overlaid",
|
||||
}
|
||||
case "bar":
|
||||
v.Properties = influxdb.XYViewProperties{
|
||||
Queries: convertQueries(cell.Queries),
|
||||
Axes: convertAxes(cell.Axes),
|
||||
Type: "xy",
|
||||
Geom: "bar",
|
||||
ViewColors: convertColors(cell.CellColors),
|
||||
Note: cell.Note,
|
||||
Position: "overlaid",
|
||||
}
|
||||
case "line-plus-single-stat":
|
||||
v.Properties = influxdb.LinePlusSingleStatProperties{
|
||||
Queries: convertQueries(cell.Queries),
|
||||
Axes: convertAxes(cell.Axes),
|
||||
ViewColors: convertColors(cell.CellColors),
|
||||
Note: cell.Note,
|
||||
Position: "overlaid",
|
||||
}
|
||||
case "single-stat":
|
||||
v.Properties = influxdb.SingleStatViewProperties{
|
||||
Queries: convertQueries(cell.Queries),
|
||||
ViewColors: convertColors(cell.CellColors),
|
||||
Note: cell.Note,
|
||||
// TODO(desa): what to do about ShowNoteWhenEmpty?
|
||||
}
|
||||
case "gauge":
|
||||
v.Properties = influxdb.GaugeViewProperties{
|
||||
Queries: convertQueries(cell.Queries),
|
||||
ViewColors: convertColors(cell.CellColors),
|
||||
Note: cell.Note,
|
||||
// TODO(desa): what to do about ShowNoteWhenEmpty?
|
||||
}
|
||||
case "table":
|
||||
v.Properties = influxdb.TableViewProperties{
|
||||
Queries: convertQueries(cell.Queries),
|
||||
ViewColors: convertColors(cell.CellColors),
|
||||
//TableOptions
|
||||
//FieldOptions
|
||||
Note: cell.Note,
|
||||
// TODO(desa): what to do about ShowNoteWhenEmpty?
|
||||
}
|
||||
case "note":
|
||||
v.Properties = influxdb.MarkdownViewProperties{
|
||||
Note: cell.Note,
|
||||
}
|
||||
case "alerts", "news", "guide":
|
||||
// TODO(desa): these do not have 2.x equivalents
|
||||
v.Properties = influxdb.EmptyViewProperties{}
|
||||
default:
|
||||
v.Properties = influxdb.EmptyViewProperties{}
|
||||
}
|
||||
|
||||
c.View = &v
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func convert1To2Variable(t chronograf.Template) (influxdb.Variable, error) {
|
||||
v := influxdb.Variable{
|
||||
Description: t.Label,
|
||||
Name: t.Var[1 : len(t.Var)-1], // trims `:` from variables prefix and suffix
|
||||
}
|
||||
|
||||
switch t.Type {
|
||||
case "influxql", "databases", "fieldKeys", "tagKeys", "tagValues", "measurements":
|
||||
if t.Query == nil {
|
||||
return v, fmt.Errorf("expected template variable to have non-nil query")
|
||||
}
|
||||
}
|
||||
|
||||
switch t.Type {
|
||||
case "influxql":
|
||||
v.Arguments = &influxdb.VariableArguments{
|
||||
Type: "query",
|
||||
Values: influxdb.VariableQueryValues{
|
||||
Query: fmt.Sprintf("// %s", t.Query.Command),
|
||||
Language: "flux",
|
||||
},
|
||||
}
|
||||
case "databases":
|
||||
v.Arguments = &influxdb.VariableArguments{
|
||||
Type: "query",
|
||||
Values: influxdb.VariableQueryValues{
|
||||
Query: fmt.Sprintf("// SHOW DATABASES %s", t.Query.DB),
|
||||
Language: "flux",
|
||||
},
|
||||
}
|
||||
case "fieldKeys":
|
||||
v.Arguments = &influxdb.VariableArguments{
|
||||
Type: "query",
|
||||
Values: influxdb.VariableQueryValues{
|
||||
Query: fmt.Sprintf("// SHOW FIELD KEYS FOR %s", t.Query.Measurement),
|
||||
Language: "flux",
|
||||
},
|
||||
}
|
||||
case "tagKeys":
|
||||
v.Arguments = &influxdb.VariableArguments{
|
||||
Type: "query",
|
||||
Values: influxdb.VariableQueryValues{
|
||||
Query: fmt.Sprintf("// SHOW TAG KEYS FOR %s", t.Query.Measurement),
|
||||
Language: "flux",
|
||||
},
|
||||
}
|
||||
case "tagValues":
|
||||
v.Arguments = &influxdb.VariableArguments{
|
||||
Type: "query",
|
||||
Values: influxdb.VariableQueryValues{
|
||||
Query: fmt.Sprintf("// SHOW TAG VALUES FOR %s", t.Query.TagKey),
|
||||
Language: "flux",
|
||||
},
|
||||
}
|
||||
case "measurements":
|
||||
v.Arguments = &influxdb.VariableArguments{
|
||||
Type: "query",
|
||||
Values: influxdb.VariableQueryValues{
|
||||
Query: fmt.Sprintf("// SHOW MEASUREMENTS ON %s", t.Query.DB),
|
||||
Language: "flux",
|
||||
},
|
||||
}
|
||||
case "csv", "constant", "text":
|
||||
values := influxdb.VariableConstantValues{}
|
||||
for _, val := range t.Values {
|
||||
values = append(values, val.Value)
|
||||
}
|
||||
v.Arguments = &influxdb.VariableArguments{
|
||||
Type: "constant",
|
||||
Values: values,
|
||||
}
|
||||
case "map":
|
||||
values := influxdb.VariableMapValues{}
|
||||
for _, val := range t.Values {
|
||||
values[val.Key] = val.Value
|
||||
}
|
||||
v.Arguments = &influxdb.VariableArguments{
|
||||
Type: "map",
|
||||
Values: values,
|
||||
}
|
||||
default:
|
||||
return v, fmt.Errorf("unknown variable type %s", t.Type)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func Convert1To2Dashboard(d1 chronograf.Dashboard) (influxdb.Dashboard, []influxdb.Variable, error) {
|
||||
cells := []*influxdb.Cell{}
|
||||
for _, cell := range d1.Cells {
|
||||
cells = append(cells, convert1To2Cell(cell))
|
||||
}
|
||||
|
||||
d2 := influxdb.Dashboard{
|
||||
Name: d1.Name,
|
||||
Cells: cells,
|
||||
}
|
||||
|
||||
vars := []influxdb.Variable{}
|
||||
for _, template := range d1.Templates {
|
||||
v, err := convert1To2Variable(template)
|
||||
if err != nil {
|
||||
return influxdb.Dashboard{}, nil, err
|
||||
}
|
||||
|
||||
vars = append(vars, v)
|
||||
}
|
||||
|
||||
return d2, vars, nil
|
||||
}
|
||||
|
||||
func convertAxes(a map[string]chronograf.Axis) map[string]influxdb.Axis {
|
||||
m := map[string]influxdb.Axis{}
|
||||
for k, v := range a {
|
||||
m[k] = influxdb.Axis{
|
||||
Bounds: v.Bounds,
|
||||
Label: v.Label,
|
||||
Prefix: v.Prefix,
|
||||
Suffix: v.Suffix,
|
||||
Base: v.Base,
|
||||
Scale: v.Scale,
|
||||
}
|
||||
}
|
||||
|
||||
if _, exists := m["x"]; !exists {
|
||||
m["x"] = influxdb.Axis{}
|
||||
}
|
||||
if _, exists := m["y"]; !exists {
|
||||
m["y"] = influxdb.Axis{}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func convertColors(cs []chronograf.CellColor) []influxdb.ViewColor {
|
||||
vs := []influxdb.ViewColor{}
|
||||
|
||||
hasTextColor := false
|
||||
hasThresholdColor := false
|
||||
for _, c := range cs {
|
||||
if c.Type == "text" {
|
||||
hasTextColor = true
|
||||
}
|
||||
if c.Type == "threshold" {
|
||||
hasThresholdColor = true
|
||||
}
|
||||
|
||||
v := influxdb.ViewColor{
|
||||
ID: c.ID,
|
||||
Type: c.Type,
|
||||
Hex: c.Hex,
|
||||
Name: c.Name,
|
||||
}
|
||||
vs = append(vs, v)
|
||||
}
|
||||
|
||||
if !hasTextColor {
|
||||
vs = append(vs, influxdb.ViewColor{
|
||||
ID: "base",
|
||||
Type: "text",
|
||||
Hex: "#00C9FF",
|
||||
Name: "laser",
|
||||
Value: 0,
|
||||
})
|
||||
}
|
||||
|
||||
if !hasThresholdColor {
|
||||
vs = append(vs, influxdb.ViewColor{
|
||||
ID: "t",
|
||||
Type: "threshold",
|
||||
Hex: "#4591ED",
|
||||
Name: "ocean",
|
||||
Value: 80,
|
||||
})
|
||||
}
|
||||
|
||||
return vs
|
||||
}
|
||||
|
||||
var influxQLVarPattern = regexp.MustCompile(`'?:(\w+):'?`)
|
||||
|
||||
func transpileQuery(q string) (string, error) {
|
||||
now := time.Now()
|
||||
t := influxql.NewTranspilerWithConfig(dbrpMapper{}, influxql.Config{
|
||||
Now: now,
|
||||
FallbackToDBRP: true,
|
||||
})
|
||||
|
||||
query := q
|
||||
query = strings.Replace(query, ":interval:", "8675309ns", -1)
|
||||
query = strings.Replace(query, ":dashboardTime:", "now() - 15m", 1)
|
||||
query = strings.Replace(query, ":upperDashboardTime:", "now()", 1)
|
||||
|
||||
// TODO(desa): replace all variables not using this hack
|
||||
query = influxQLVarPattern.ReplaceAllString(query, "'$1'")
|
||||
|
||||
pkg, err := t.Transpile(context.Background(), query)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return ast.Format(pkg), nil
|
||||
}
|
||||
|
||||
func convertQueries(qs []chronograf.DashboardQuery) []influxdb.DashboardQuery {
|
||||
|
||||
ds := []influxdb.DashboardQuery{}
|
||||
for _, q := range qs {
|
||||
queryText := q.Command
|
||||
if q.Type == "influxql" {
|
||||
// if the query is influxql, add it as a comment and attempt to
|
||||
// compile it to flux
|
||||
queryText = fmt.Sprintf("// %s", queryText)
|
||||
|
||||
tq, err := transpileQuery(q.Command)
|
||||
if err != nil {
|
||||
queryText = fmt.Sprintf("// Failed to transpile query: %v\n%s", err, queryText)
|
||||
} else {
|
||||
queryText = fmt.Sprintf("// Original Query:\n%s\n\n%s", queryText, tq)
|
||||
}
|
||||
}
|
||||
|
||||
d := influxdb.DashboardQuery{
|
||||
Text: queryText,
|
||||
EditMode: "advanced",
|
||||
}
|
||||
|
||||
ds = append(ds, d)
|
||||
}
|
||||
|
||||
if len(ds) == 0 {
|
||||
d := influxdb.DashboardQuery{
|
||||
Text: "// cell had no queries",
|
||||
EditMode: "advanced",
|
||||
BuilderConfig: influxdb.BuilderConfig{
|
||||
// TODO(desa): foo
|
||||
Buckets: []string{"bucket"},
|
||||
},
|
||||
}
|
||||
ds = append(ds, d)
|
||||
}
|
||||
|
||||
return ds
|
||||
}
|
||||
|
||||
type dbrpMapper struct{}
|
||||
|
||||
// FindBy returns the dbrp mapping for the specified ID.
|
||||
func (d dbrpMapper) FindByID(ctx context.Context, orgID platform.ID, id platform.ID) (*influxdb.DBRPMappingV2, error) {
|
||||
return nil, errors.New("mapping not found")
|
||||
}
|
||||
|
||||
// FindMany returns a list of dbrp mappings that match filter and the total count of matching dbrp mappings.
|
||||
func (d dbrpMapper) FindMany(ctx context.Context, dbrp influxdb.DBRPMappingFilterV2, opts ...influxdb.FindOptions) ([]*influxdb.DBRPMappingV2, int, error) {
|
||||
return nil, 0, errors.New("mapping not found")
|
||||
}
|
||||
|
||||
// Create creates a new dbrp mapping, if a different mapping exists an error is returned.
|
||||
func (d dbrpMapper) Create(ctx context.Context, dbrp *influxdb.DBRPMappingV2) error {
|
||||
return errors.New("dbrpMapper does not support creating new mappings")
|
||||
}
|
||||
|
||||
// Update a new dbrp mapping
|
||||
func (d dbrpMapper) Update(ctx context.Context, dbrp *influxdb.DBRPMappingV2) error {
|
||||
return errors.New("dbrpMapper does not support updating mappings")
|
||||
}
|
||||
|
||||
// Delete removes a dbrp mapping.
|
||||
func (d dbrpMapper) Delete(ctx context.Context, orgID platform.ID, id platform.ID) error {
|
||||
return errors.New("dbrpMapper does not support deleting mappings")
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/influxdb/v2/chronograf"
|
||||
"github.com/influxdata/influxdb/v2/chronograf/bolt"
|
||||
"github.com/influxdata/influxdb/v2/pkger"
|
||||
)
|
||||
|
||||
var chronografDBPath string
|
||||
var outputFile string
|
||||
|
||||
func exec(dbPath, out string) error {
|
||||
logger := log.New(os.Stdout, "", 0)
|
||||
|
||||
c := bolt.NewClient()
|
||||
c.Path = dbPath
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := c.Open(ctx, nil, chronograf.BuildInfo{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dashboardStore := c.DashboardsStore
|
||||
|
||||
ds, err := dashboardStore.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pkg := &pkger.Template{
|
||||
Objects: make([]pkger.Object, 0),
|
||||
}
|
||||
|
||||
hasVar := map[string]bool{}
|
||||
for _, d1 := range ds {
|
||||
d2, vs, err := Convert1To2Dashboard(d1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pkg.Objects = append(pkg.Objects, pkger.DashboardToObject(d2.Name, d2))
|
||||
|
||||
for _, v := range vs {
|
||||
name := strings.ToLower(v.Name)
|
||||
if hasVar[name] {
|
||||
// TODO(desa): not sure what we actually want to do here
|
||||
logger.Printf("Found duplicate variables with name %q skipping\n", name)
|
||||
continue
|
||||
}
|
||||
hasVar[name] = true
|
||||
|
||||
pkg.Objects = append(pkg.Objects, pkger.VariableToObject(name, v))
|
||||
}
|
||||
}
|
||||
|
||||
f, err := os.Create(out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
b, err := pkg.Encode(pkger.EncodingYAML)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(f, bytes.NewReader(b))
|
||||
return err
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&chronografDBPath, "db", "", "path to the chronograf database")
|
||||
flag.StringVar(&outputFile, "output", "dashboards.yml", "path to the output yaml file")
|
||||
flag.Parse()
|
||||
|
||||
if chronografDBPath == "" {
|
||||
fmt.Fprintln(os.Stdout, "must supply db flag")
|
||||
return
|
||||
}
|
||||
|
||||
if err := exec(chronografDBPath, outputFile); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err.Error())
|
||||
}
|
||||
}
|
|
@ -24,7 +24,6 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/backup"
|
||||
"github.com/influxdata/influxdb/v2/bolt"
|
||||
"github.com/influxdata/influxdb/v2/checks"
|
||||
"github.com/influxdata/influxdb/v2/chronograf/server"
|
||||
"github.com/influxdata/influxdb/v2/dashboards"
|
||||
dashboardTransport "github.com/influxdata/influxdb/v2/dashboards/transport"
|
||||
"github.com/influxdata/influxdb/v2/dbrp"
|
||||
|
@ -415,12 +414,6 @@ func (m *Launcher) run(ctx context.Context, opts *InfluxdOpts) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
chronografSvc, err := server.NewServiceV2(ctx, m.boltClient.DB())
|
||||
if err != nil {
|
||||
m.log.Error("Failed creating chronograf service", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
metaClient := meta.NewClient(meta.NewConfig(), m.kvStore)
|
||||
if err := metaClient.Open(); err != nil {
|
||||
m.log.Error("Failed to open meta client", zap.Error(err))
|
||||
|
@ -818,7 +811,6 @@ func (m *Launcher) run(ctx context.Context, opts *InfluxdOpts) (err error) {
|
|||
NotificationEndpointService: notificationEndpointSvc,
|
||||
CheckService: checkSvc,
|
||||
ScraperTargetStoreService: scraperTargetSvc,
|
||||
ChronografService: chronografSvc,
|
||||
SecretService: secretSvc,
|
||||
LookupService: resourceResolver,
|
||||
DocumentService: m.kvService,
|
||||
|
|
|
@ -46,7 +46,7 @@ func TestLauncher_Setup(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// This is to mimic chronograf using cookies as sessions
|
||||
// This is to mimic the UI using cookies as sessions
|
||||
// rather than authorizations
|
||||
func TestLauncher_SetupWithUsers(t *testing.T) {
|
||||
l := launcher.RunAndSetupNewLauncherOrFail(ctx, t)
|
||||
|
|
|
@ -8,8 +8,6 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/v2/chronograf"
|
||||
)
|
||||
|
||||
// Shared transports for all clients to prevent leaking connections.
|
||||
|
@ -49,7 +47,7 @@ func (c *Client) pingTimeout(ctx context.Context) error {
|
|||
case resp := <-resps:
|
||||
return resp
|
||||
case <-ctx.Done():
|
||||
return chronograf.ErrUpstreamTimeout
|
||||
return fmt.Errorf("request to backend timed out")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/influxdata/httprouter"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/authorizer"
|
||||
"github.com/influxdata/influxdb/v2/chronograf/server"
|
||||
"github.com/influxdata/influxdb/v2/dbrp"
|
||||
"github.com/influxdata/influxdb/v2/http/metric"
|
||||
"github.com/influxdata/influxdb/v2/influxql"
|
||||
|
@ -98,7 +97,6 @@ type APIBackend struct {
|
|||
ScraperTargetStoreService influxdb.ScraperTargetStoreService
|
||||
SecretService influxdb.SecretService
|
||||
LookupService influxdb.LookupService
|
||||
ChronografService *server.Service
|
||||
OrgLookupService authorizer.OrgIDResolver
|
||||
DocumentService influxdb.DocumentService
|
||||
NotificationRuleStore influxdb.NotificationRuleStore
|
||||
|
@ -148,8 +146,6 @@ func NewAPIHandler(b *APIBackend, opts ...APIHandlerOptFn) *APIHandler {
|
|||
b.UserResourceMappingService, b.OrganizationService)
|
||||
h.Mount(prefixChecks, NewCheckHandler(b.Logger, checkBackend))
|
||||
|
||||
h.Mount(prefixChronograf, NewChronografHandler(b.ChronografService, b.HTTPErrorHandler))
|
||||
|
||||
deleteBackend := NewDeleteBackend(b.Logger.With(zap.String("handler", "delete")), b)
|
||||
h.Mount(prefixDelete, NewDeleteHandler(b.Logger, deleteBackend))
|
||||
|
||||
|
|
|
@ -1,110 +0,0 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"github.com/influxdata/httprouter"
|
||||
"github.com/influxdata/influxdb/v2/chronograf/server"
|
||||
"github.com/influxdata/influxdb/v2/kit/platform/errors"
|
||||
)
|
||||
|
||||
const prefixChronograf = "/chronograf"
|
||||
|
||||
// ChronografHandler is an http handler for serving chronograf chronografs.
|
||||
type ChronografHandler struct {
|
||||
*httprouter.Router
|
||||
Service *server.Service
|
||||
}
|
||||
|
||||
// NewChronografHandler is the constructor an chronograf handler.
|
||||
func NewChronografHandler(s *server.Service, he errors.HTTPErrorHandler) *ChronografHandler {
|
||||
h := &ChronografHandler{
|
||||
Router: NewRouter(he),
|
||||
Service: s,
|
||||
}
|
||||
/* API */
|
||||
// Organizations
|
||||
h.HandlerFunc("GET", "/chronograf/v1/organizations", h.Service.Organizations)
|
||||
h.HandlerFunc("POST", "/chronograf/v1/organizations", h.Service.NewOrganization)
|
||||
|
||||
h.HandlerFunc("GET", "/chronograf/v1/organizations/:oid", h.Service.OrganizationID)
|
||||
h.HandlerFunc("PATCH", "/chronograf/v1/organizations/:oid", h.Service.UpdateOrganization)
|
||||
h.HandlerFunc("DELETE", "/chronograf/v1/organizations/:oid", h.Service.RemoveOrganization)
|
||||
|
||||
// Mappings
|
||||
h.HandlerFunc("GET", "/chronograf/v1/mappings", h.Service.Mappings)
|
||||
h.HandlerFunc("POST", "/chronograf/v1/mappings", h.Service.NewMapping)
|
||||
|
||||
h.HandlerFunc("PUT", "/chronograf/v1/mappings/:id", h.Service.UpdateMapping)
|
||||
h.HandlerFunc("DELETE", "/chronograf/v1/mappings/:id", h.Service.RemoveMapping)
|
||||
|
||||
// Layouts
|
||||
h.HandlerFunc("GET", "/chronograf/v1/layouts", h.Service.Layouts)
|
||||
h.HandlerFunc("GET", "/chronograf/v1/layouts/:id", h.Service.LayoutsID)
|
||||
|
||||
// Users associated with Chronograf
|
||||
h.HandlerFunc("GET", "/chronograf/v1/me", h.Service.Me)
|
||||
|
||||
// TODO(desa): what to do here?
|
||||
// Set current chronograf organization the user is logged into
|
||||
//h.HandlerFunc("PUT", "/chronograf/v1/me", h.Service.UpdateMe(opts.Auth))
|
||||
|
||||
// TODO(desa): what to do about admin's being able to set superadmin
|
||||
h.HandlerFunc("GET", "/chronograf/v1/organizations/:oid/users", h.Service.Users)
|
||||
h.HandlerFunc("POST", "/chronograf/v1/organizations/:oid/users", h.Service.NewUser)
|
||||
|
||||
h.HandlerFunc("GET", "/chronograf/v1/organizations/:oid/users/:id", h.Service.UserID)
|
||||
h.HandlerFunc("DELETE", "/chronograf/v1/organizations/:oid/users/:id", h.Service.RemoveUser)
|
||||
h.HandlerFunc("PATCH", "/chronograf/v1/organizations/:oid/users/:id", h.Service.UpdateUser)
|
||||
|
||||
h.HandlerFunc("GET", "/chronograf/v1/users", h.Service.Users)
|
||||
h.HandlerFunc("POST", "/chronograf/v1/users", h.Service.NewUser)
|
||||
|
||||
h.HandlerFunc("GET", "/chronograf/v1/users/:id", h.Service.UserID)
|
||||
h.HandlerFunc("DELETE", "/chronograf/v1/users/:id", h.Service.RemoveUser)
|
||||
h.HandlerFunc("PATCH", "/chronograf/v1/users/:id", h.Service.UpdateUser)
|
||||
|
||||
// Dashboards
|
||||
h.HandlerFunc("GET", "/chronograf/v1/dashboards", h.Service.Dashboards)
|
||||
h.HandlerFunc("POST", "/chronograf/v1/dashboards", h.Service.NewDashboard)
|
||||
|
||||
h.HandlerFunc("GET", "/chronograf/v1/dashboards/:id", h.Service.DashboardID)
|
||||
h.HandlerFunc("DELETE", "/chronograf/v1/dashboards/:id", h.Service.RemoveDashboard)
|
||||
h.HandlerFunc("PUT", "/chronograf/v1/dashboards/:id", h.Service.ReplaceDashboard)
|
||||
h.HandlerFunc("PATCH", "/chronograf/v1/dashboards/:id", h.Service.UpdateDashboard)
|
||||
// Dashboard Cells
|
||||
h.HandlerFunc("GET", "/chronograf/v1/dashboards/:id/cells", h.Service.DashboardCells)
|
||||
h.HandlerFunc("POST", "/chronograf/v1/dashboards/:id/cells", h.Service.NewDashboardCell)
|
||||
|
||||
h.HandlerFunc("GET", "/chronograf/v1/dashboards/:id/cells/:cid", h.Service.DashboardCellID)
|
||||
h.HandlerFunc("DELETE", "/chronograf/v1/dashboards/:id/cells/:cid", h.Service.RemoveDashboardCell)
|
||||
h.HandlerFunc("PUT", "/chronograf/v1/dashboards/:id/cells/:cid", h.Service.ReplaceDashboardCell)
|
||||
// Dashboard Templates
|
||||
h.HandlerFunc("GET", "/chronograf/v1/dashboards/:id/templates", h.Service.Templates)
|
||||
h.HandlerFunc("POST", "/chronograf/v1/dashboards/:id/templates", h.Service.NewTemplate)
|
||||
|
||||
h.HandlerFunc("GET", "/chronograf/v1/dashboards/:id/templates/:tid", h.Service.TemplateID)
|
||||
h.HandlerFunc("DELETE", "/chronograf/v1/dashboards/:id/templates/:tid", h.Service.RemoveTemplate)
|
||||
h.HandlerFunc("PUT", "/chronograf/v1/dashboards/:id/templates/:tid", h.Service.ReplaceTemplate)
|
||||
|
||||
// Global application config for Chronograf
|
||||
h.HandlerFunc("GET", "/chronograf/v1/config", h.Service.Config)
|
||||
h.HandlerFunc("GET", "/chronograf/v1/config/auth", h.Service.AuthConfig)
|
||||
h.HandlerFunc("PUT", "/chronograf/v1/config/auth", h.Service.ReplaceAuthConfig)
|
||||
|
||||
// Organization config settings for Chronograf
|
||||
h.HandlerFunc("GET", "/chronograf/v1/org_config", h.Service.OrganizationConfig)
|
||||
h.HandlerFunc("GET", "/chronograf/v1/org_config/logviewer", h.Service.OrganizationLogViewerConfig)
|
||||
h.HandlerFunc("PUT", "/chronograf/v1/org_config/logviewer", h.Service.ReplaceOrganizationLogViewerConfig)
|
||||
|
||||
h.HandlerFunc("GET", "/chronograf/v1/env", h.Service.Environment)
|
||||
|
||||
allRoutes := &server.AllRoutes{
|
||||
// TODO(desa): what to do here
|
||||
//logger: opts.logger,
|
||||
//CustomLinks: opts.CustomLinks,
|
||||
StatusFeed: "https://www.influxdata.com/feed/json",
|
||||
}
|
||||
|
||||
h.Handler("GET", "/chronograf/v1/", allRoutes)
|
||||
|
||||
return h
|
||||
}
|
|
@ -65,11 +65,10 @@ func (h *PlatformHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
// Serve the chronograf assets for any basepath that does not start with addressable parts
|
||||
// of the platform API.
|
||||
// Serve the static UI assets for any basepath that does not start with
|
||||
// addressable parts of the platform API.
|
||||
if !strings.HasPrefix(r.URL.Path, "/v1") &&
|
||||
!strings.HasPrefix(r.URL.Path, "/api/v2") &&
|
||||
!strings.HasPrefix(r.URL.Path, "/chronograf/") &&
|
||||
!strings.HasPrefix(r.URL.Path, "/private/") {
|
||||
h.AssetHandler.ServeHTTP(w, r)
|
||||
return
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
const index = `<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Chronograf API</title>
|
||||
<title>InfluxDB 2 API</title>
|
||||
<!-- needed for adaptive design -->
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<!--
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
package errors
|
||||
|
||||
// ChronografError is a domain error encountered while processing chronograf requests.
|
||||
type ChronografError string
|
||||
|
||||
// ChronografError returns the string of an error.
|
||||
func (e ChronografError) Error() string {
|
||||
return string(e)
|
||||
}
|
Loading…
Reference in New Issue