diff --git a/chronograf.go b/chronograf.go
index 9d1d355d5..021877f1d 100644
--- a/chronograf.go
+++ b/chronograf.go
@@ -325,6 +325,36 @@ type UsersStore interface {
Update(context.Context, *User) error
}
+// Database represents a database in a time series source
+type Database struct {
+ Name string `json:"name"` // a unique string identifier for the database
+ Duration string `json:"duration,omitempty"` // the duration (when creating a default retention policy)
+ Replication int32 `json:"replication,omitempty"` // the replication factor (when creating a default retention policy)
+ ShardDuration string `json:"shardDuration,omitempty"` // the shard duration (when creating a default retention policy)
+}
+
+// RetentionPolicy represents a retention policy in a time series source
+type RetentionPolicy struct {
+ Name string `json:"name"` // a unique string identifier for the retention policy
+ Duration string `json:"duration,omitempty"` // the duration
+ Replication int32 `json:"replication,omitempty"` // the replication factor
+ ShardDuration string `json:"shardDuration,omitempty"` // the shard duration
+ Default bool `json:"isDefault,omitempty"` // whether the RP should be the default
+}
+
+// Databases represents a databases in a time series source
+type Databases interface {
+ // All lists all databases
+ AllDB(context.Context) ([]Database, error)
+ Connect(context.Context, *Source) error
+ CreateDB(context.Context, *Database) (*Database, error)
+ DropDB(context.Context, string) error
+ AllRP(context.Context, string) ([]RetentionPolicy, error)
+ CreateRP(context.Context, string, *RetentionPolicy) (*RetentionPolicy, error)
+ UpdateRP(context.Context, string, string, *RetentionPolicy) (*RetentionPolicy, error)
+ DropRP(context.Context, string, string) error
+}
+
// DashboardID is the dashboard ID
type DashboardID int
diff --git a/influx/databases.go b/influx/databases.go
new file mode 100644
index 000000000..6cff80129
--- /dev/null
+++ b/influx/databases.go
@@ -0,0 +1,204 @@
+package influx
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/influxdata/chronograf"
+)
+
+// AllDB returns all databases from within Influx
+func (c *Client) AllDB(ctx context.Context) ([]chronograf.Database, error) {
+ databases, err := c.showDatabases(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return databases, nil
+}
+
+// CreateDB creates a database within Influx
+func (c *Client) CreateDB(ctx context.Context, db *chronograf.Database) (*chronograf.Database, error) {
+ _, err := c.Query(ctx, chronograf.Query{
+ Command: fmt.Sprintf(`CREATE DATABASE "%s"`, db.Name),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ res := &chronograf.Database{Name: db.Name}
+
+ return res, nil
+}
+
+// DropDB drops a database within Influx
+func (c *Client) DropDB(ctx context.Context, database string) error {
+ _, err := c.Query(ctx, chronograf.Query{
+ Command: fmt.Sprintf(`DROP DATABASE "%s"`, database),
+ DB: database,
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// AllRP returns all the retention policies for a specific database
+func (c *Client) AllRP(ctx context.Context, database string) ([]chronograf.RetentionPolicy, error) {
+ retentionPolicies, err := c.showRetentionPolicies(ctx, database)
+ if err != nil {
+ return nil, err
+ }
+
+ return retentionPolicies, nil
+}
+
+func (c *Client) getRP(ctx context.Context, db, name string) (chronograf.RetentionPolicy, error) {
+ rps, err := c.AllRP(ctx, db)
+ if err != nil {
+ return chronograf.RetentionPolicy{}, err
+ }
+
+ for _, rp := range rps {
+ if rp.Name == name {
+ return rp, nil
+ }
+ }
+ return chronograf.RetentionPolicy{}, fmt.Errorf("unknown retention policy")
+}
+
+// CreateRP creates a retention policy for a specific database
+func (c *Client) CreateRP(ctx context.Context, database string, rp *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error) {
+ query := fmt.Sprintf(`CREATE RETENTION POLICY "%s" ON "%s" DURATION %s REPLICATION %d`, rp.Name, database, rp.Duration, rp.Replication)
+ if len(rp.ShardDuration) != 0 {
+ query = fmt.Sprintf(`%s SHARD DURATION %s`, query, rp.ShardDuration)
+ }
+
+ if rp.Default {
+ query = fmt.Sprintf(`%s DEFAULT`, query)
+ }
+
+ _, err := c.Query(ctx, chronograf.Query{
+ Command: query,
+ DB: database,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ res, err := c.getRP(ctx, database, rp.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ return &res, nil
+}
+
+// UpdateRP updates a specific retention policy for a specific database
+func (c *Client) UpdateRP(ctx context.Context, database string, name string, rp *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error) {
+ var buffer bytes.Buffer
+ buffer.WriteString(fmt.Sprintf(`ALTER RETENTION POLICY "%s" ON "%s"`, name, database))
+ if len(rp.Duration) > 0 {
+ buffer.WriteString(" DURATION " + rp.Duration)
+ }
+ if rp.Replication > 0 {
+ buffer.WriteString(" REPLICATION " + fmt.Sprint(rp.Replication))
+ }
+ if len(rp.ShardDuration) > 0 {
+ buffer.WriteString(" SHARD DURATION " + rp.ShardDuration)
+ }
+ if rp.Default == true {
+ buffer.WriteString(" DEFAULT")
+ }
+ queryRes, err := c.Query(ctx, chronograf.Query{
+ Command: buffer.String(),
+ DB: database,
+ RP: name,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // The ALTER RETENTION POLICIES statements puts the error within the results itself
+ // So, we have to crack open the results to see what happens
+ octets, err := queryRes.MarshalJSON()
+ if err != nil {
+ return nil, err
+ }
+
+ results := make([]struct{ Error string }, 0)
+ if err := json.Unmarshal(octets, &results); err != nil {
+ return nil, err
+ }
+
+ // At last, we can check if there are any error strings
+ for _, r := range results {
+ if r.Error != "" {
+ return nil, fmt.Errorf(r.Error)
+ }
+ }
+
+ res, err := c.getRP(ctx, database, rp.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ return &res, nil
+}
+
+// DropRP removes a specific retention policy for a specific database
+func (c *Client) DropRP(ctx context.Context, database string, rp string) error {
+ _, err := c.Query(ctx, chronograf.Query{
+ Command: fmt.Sprintf(`DROP RETENTION POLICY "%s" ON "%s"`, rp, database),
+ DB: database,
+ RP: rp,
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *Client) showDatabases(ctx context.Context) ([]chronograf.Database, error) {
+ res, err := c.Query(ctx, chronograf.Query{
+ Command: `SHOW DATABASES`,
+ })
+ if err != nil {
+ return nil, err
+ }
+ octets, err := res.MarshalJSON()
+ if err != nil {
+ return nil, err
+ }
+
+ results := showResults{}
+ if err := json.Unmarshal(octets, &results); err != nil {
+ return nil, err
+ }
+
+ return results.Databases(), nil
+}
+
+func (c *Client) showRetentionPolicies(ctx context.Context, name string) ([]chronograf.RetentionPolicy, error) {
+ retentionPolicies, err := c.Query(ctx, chronograf.Query{
+ Command: fmt.Sprintf(`SHOW RETENTION POLICIES ON "%s"`, name),
+ DB: name,
+ })
+
+ if err != nil {
+ return nil, err
+ }
+ octets, err := retentionPolicies.MarshalJSON()
+ if err != nil {
+ return nil, err
+ }
+
+ results := showResults{}
+ if err := json.Unmarshal(octets, &results); err != nil {
+ return nil, err
+ }
+
+ return results.RetentionPolicies(), nil
+}
diff --git a/influx/influx.go b/influx/influx.go
index 4f08efbf5..f01234af5 100644
--- a/influx/influx.go
+++ b/influx/influx.go
@@ -15,6 +15,7 @@ import (
var _ chronograf.TimeSeries = &Client{}
var _ chronograf.TSDBStatus = &Client{}
+var _ chronograf.Databases = &Client{}
// Shared transports for all clients to prevent leaking connections
var (
diff --git a/influx/permissions.go b/influx/permissions.go
index 809aff953..ba3206acb 100644
--- a/influx/permissions.go
+++ b/influx/permissions.go
@@ -75,6 +75,55 @@ func (r *showResults) Users() []chronograf.User {
return res
}
+// Databases converts SHOW DATABASES to chronograf Databases
+func (r *showResults) Databases() []chronograf.Database {
+ res := []chronograf.Database{}
+ for _, u := range *r {
+ for _, s := range u.Series {
+ for _, v := range s.Values {
+ if name, ok := v[0].(string); !ok {
+ continue
+ } else {
+ d := chronograf.Database{Name: name}
+ res = append(res, d)
+ }
+ }
+ }
+ }
+ return res
+}
+
+func (r *showResults) RetentionPolicies() []chronograf.RetentionPolicy {
+ res := []chronograf.RetentionPolicy{}
+ for _, u := range *r {
+ for _, s := range u.Series {
+ for _, v := range s.Values {
+ if name, ok := v[0].(string); !ok {
+ continue
+ } else if duration, ok := v[1].(string); !ok {
+ continue
+ } else if sduration, ok := v[2].(string); !ok {
+ continue
+ } else if replication, ok := v[3].(float64); !ok {
+ continue
+ } else if def, ok := v[4].(bool); !ok {
+ continue
+ } else {
+ d := chronograf.RetentionPolicy{
+ Name: name,
+ Duration: duration,
+ ShardDuration: sduration,
+ Replication: int32(replication),
+ Default: def,
+ }
+ res = append(res, d)
+ }
+ }
+ }
+ }
+ return res
+}
+
// Permissions converts SHOW GRANTS to chronograf.Permissions
func (r *showResults) Permissions() chronograf.Permissions {
res := []chronograf.Permission{}
diff --git a/server/dashboards.go b/server/dashboards.go
index 07c1fb2e1..f6025099d 100644
--- a/server/dashboards.go
+++ b/server/dashboards.go
@@ -4,9 +4,7 @@ import (
"encoding/json"
"fmt"
"net/http"
- "strconv"
- "github.com/bouk/httprouter"
"github.com/influxdata/chronograf"
"github.com/influxdata/chronograf/influx"
"github.com/influxdata/chronograf/uuid"
@@ -160,7 +158,7 @@ func (s *Service) RemoveDashboard(w http.ResponseWriter, r *http.Request) {
// ReplaceDashboard completely replaces a dashboard
func (s *Service) ReplaceDashboard(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
- idParam, err := strconv.Atoi(httprouter.GetParamFromContext(ctx, "id"))
+ idParam, err := paramID("id", r)
if err != nil {
msg := fmt.Sprintf("Could not parse dashboard ID: %s", err)
Error(w, http.StatusInternalServerError, msg, s.Logger)
@@ -198,10 +196,11 @@ func (s *Service) ReplaceDashboard(w http.ResponseWriter, r *http.Request) {
// UpdateDashboard completely updates either the dashboard name or the cells
func (s *Service) UpdateDashboard(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
- idParam, err := strconv.Atoi(httprouter.GetParamFromContext(ctx, "id"))
+ idParam, err := paramID("id", r)
if err != nil {
msg := fmt.Sprintf("Could not parse dashboard ID: %s", err)
Error(w, http.StatusInternalServerError, msg, s.Logger)
+ return
}
id := chronograf.DashboardID(idParam)
diff --git a/server/databases.go b/server/databases.go
new file mode 100644
index 000000000..1e0456609
--- /dev/null
+++ b/server/databases.go
@@ -0,0 +1,411 @@
+package server
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "github.com/bouk/httprouter"
+ "github.com/influxdata/chronograf"
+)
+
+type dbLinks struct {
+ Self string `json:"self"` // Self link mapping to this resource
+ RPs string `json:"retentionPolicies"` // URL for retention policies for this database
+}
+
+type dbResponse struct {
+ Name string `json:"name"` // a unique string identifier for the database
+ Duration string `json:"duration,omitempty"` // the duration (when creating a default retention policy)
+ Replication int32 `json:"replication,omitempty"` // the replication factor (when creating a default retention policy)
+ ShardDuration string `json:"shardDuration,omitempty"` // the shard duration (when creating a default retention policy)
+ RPs []rpResponse `json:"retentionPolicies,omitempty"` // RPs are the retention policies for a database
+ Links dbLinks `json:"links"` // Links are URI locations related to the database
+}
+
+// newDBResponse creates the response for the /databases endpoint
+func newDBResponse(srcID int, name string, rps []rpResponse) dbResponse {
+ base := "/chronograf/v1/sources"
+ return dbResponse{
+ Name: name,
+ RPs: rps,
+ Links: dbLinks{
+ Self: fmt.Sprintf("%s/%d/dbs/%s", base, srcID, name),
+ RPs: fmt.Sprintf("%s/%d/dbs/%s/rps", base, srcID, name),
+ },
+ }
+}
+
+type dbsResponse struct {
+ Databases []dbResponse `json:"databases"`
+}
+
+type rpLinks struct {
+ Self string `json:"self"` // Self link mapping to this resource
+}
+
+type rpResponse struct {
+ Name string `json:"name"` // a unique string identifier for the retention policy
+ Duration string `json:"duration"` // the duration
+ Replication int32 `json:"replication"` // the replication factor
+ ShardDuration string `json:"shardDuration"` // the shard duration
+ Default bool `json:"isDefault"` // whether the RP should be the default
+ Links rpLinks `json:"links"` // Links are URI locations related to the database
+}
+
+// WithLinks adds links to an rpResponse in place
+func (r *rpResponse) WithLinks(srcID int, dbName string) {
+ base := "/chronograf/v1/sources"
+ r.Links = rpLinks{
+ Self: fmt.Sprintf("%s/%d/dbs/%s/rps/%s", base, srcID, dbName, r.Name),
+ }
+}
+
+type rpsResponse struct {
+ RetentionPolicies []rpResponse `json:"retentionPolicies"`
+}
+
+// GetDatabases queries the list of all databases for a source
+func (h *Service) GetDatabases(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ srcID, err := paramID("id", r)
+ if err != nil {
+ Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
+ return
+ }
+
+ src, err := h.SourcesStore.Get(ctx, srcID)
+ if err != nil {
+ notFound(w, srcID, h.Logger)
+ return
+ }
+
+ db := h.Databases
+ if err = db.Connect(ctx, &src); err != nil {
+ msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
+ Error(w, http.StatusBadRequest, msg, h.Logger)
+ return
+ }
+
+ databases, err := db.AllDB(ctx)
+ if err != nil {
+ Error(w, http.StatusBadRequest, err.Error(), h.Logger)
+ return
+ }
+
+ dbs := make([]dbResponse, len(databases))
+ for i, d := range databases {
+ rps, err := h.allRPs(ctx, db, srcID, d.Name)
+ if err != nil {
+ Error(w, http.StatusBadRequest, err.Error(), h.Logger)
+ return
+ }
+ dbs[i] = newDBResponse(srcID, d.Name, rps)
+ }
+
+ res := dbsResponse{
+ Databases: dbs,
+ }
+
+ encodeJSON(w, http.StatusOK, res, h.Logger)
+}
+
+// NewDatabase creates a new database within the datastore
+func (h *Service) NewDatabase(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ srcID, err := paramID("id", r)
+ if err != nil {
+ Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
+ return
+ }
+
+ src, err := h.SourcesStore.Get(ctx, srcID)
+ if err != nil {
+ notFound(w, srcID, h.Logger)
+ return
+ }
+
+ db := h.Databases
+
+ if err = db.Connect(ctx, &src); err != nil {
+ msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
+ Error(w, http.StatusBadRequest, msg, h.Logger)
+ return
+ }
+
+ postedDB := &chronograf.Database{}
+ if err := json.NewDecoder(r.Body).Decode(postedDB); err != nil {
+ invalidJSON(w, h.Logger)
+ return
+ }
+
+ if err := ValidDatabaseRequest(postedDB); err != nil {
+ invalidData(w, err, h.Logger)
+ return
+ }
+
+ database, err := db.CreateDB(ctx, postedDB)
+ if err != nil {
+ Error(w, http.StatusBadRequest, err.Error(), h.Logger)
+ return
+ }
+
+ rps, err := h.allRPs(ctx, db, srcID, database.Name)
+ if err != nil {
+ Error(w, http.StatusBadRequest, err.Error(), h.Logger)
+ return
+ }
+ res := newDBResponse(srcID, database.Name, rps)
+ encodeJSON(w, http.StatusCreated, res, h.Logger)
+}
+
+// DropDatabase removes a database from a data source
+func (h *Service) DropDatabase(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ srcID, err := paramID("id", r)
+ if err != nil {
+ Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
+ return
+ }
+
+ src, err := h.SourcesStore.Get(ctx, srcID)
+ if err != nil {
+ notFound(w, srcID, h.Logger)
+ return
+ }
+
+ db := h.Databases
+
+ if err = db.Connect(ctx, &src); err != nil {
+ msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
+ Error(w, http.StatusBadRequest, msg, h.Logger)
+ return
+ }
+
+ dbID := httprouter.GetParamFromContext(ctx, "dbid")
+
+ dropErr := db.DropDB(ctx, dbID)
+ if dropErr != nil {
+ Error(w, http.StatusBadRequest, dropErr.Error(), h.Logger)
+ return
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+}
+
+// RetentionPolicies lists retention policies within a database
+func (h *Service) RetentionPolicies(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ srcID, err := paramID("id", r)
+ if err != nil {
+ Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
+ return
+ }
+
+ src, err := h.SourcesStore.Get(ctx, srcID)
+ if err != nil {
+ notFound(w, srcID, h.Logger)
+ return
+ }
+
+ db := h.Databases
+ if err = db.Connect(ctx, &src); err != nil {
+ msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
+ Error(w, http.StatusBadRequest, msg, h.Logger)
+ return
+ }
+
+ dbID := httprouter.GetParamFromContext(ctx, "dbid")
+ res, err := h.allRPs(ctx, db, srcID, dbID)
+ if err != nil {
+ msg := fmt.Sprintf("Unable to connect get RPs %d: %v", srcID, err)
+ Error(w, http.StatusBadRequest, msg, h.Logger)
+ return
+ }
+ encodeJSON(w, http.StatusOK, res, h.Logger)
+}
+
+func (h *Service) allRPs(ctx context.Context, db chronograf.Databases, srcID int, dbID string) ([]rpResponse, error) {
+ allRP, err := db.AllRP(ctx, dbID)
+ if err != nil {
+ return nil, err
+ }
+
+ rps := make([]rpResponse, len(allRP))
+ for i, rp := range allRP {
+ rp := rpResponse{
+ Name: rp.Name,
+ Duration: rp.Duration,
+ Replication: rp.Replication,
+ ShardDuration: rp.ShardDuration,
+ Default: rp.Default,
+ }
+ rp.WithLinks(srcID, dbID)
+ rps[i] = rp
+ }
+ return rps, nil
+}
+
+// NewRetentionPolicy creates a new retention policy for a database
+func (h *Service) NewRetentionPolicy(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ srcID, err := paramID("id", r)
+ if err != nil {
+ Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
+ return
+ }
+
+ src, err := h.SourcesStore.Get(ctx, srcID)
+ if err != nil {
+ notFound(w, srcID, h.Logger)
+ return
+ }
+
+ db := h.Databases
+ if err = db.Connect(ctx, &src); err != nil {
+ msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
+ Error(w, http.StatusBadRequest, msg, h.Logger)
+ return
+ }
+
+ postedRP := &chronograf.RetentionPolicy{}
+ if err := json.NewDecoder(r.Body).Decode(postedRP); err != nil {
+ invalidJSON(w, h.Logger)
+ return
+ }
+ if err := ValidRetentionPolicyRequest(postedRP); err != nil {
+ invalidData(w, err, h.Logger)
+ return
+ }
+
+ dbID := httprouter.GetParamFromContext(ctx, "dbid")
+ rp, err := db.CreateRP(ctx, dbID, postedRP)
+ if err != nil {
+ Error(w, http.StatusBadRequest, err.Error(), h.Logger)
+ return
+ }
+ res := rpResponse{
+ Name: rp.Name,
+ Duration: rp.Duration,
+ Replication: rp.Replication,
+ ShardDuration: rp.ShardDuration,
+ Default: rp.Default,
+ }
+ res.WithLinks(srcID, dbID)
+ encodeJSON(w, http.StatusCreated, res, h.Logger)
+}
+
+// UpdateRetentionPolicy modifies an existing retention policy for a database
+func (h *Service) UpdateRetentionPolicy(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ srcID, err := paramID("id", r)
+ if err != nil {
+ Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
+ return
+ }
+
+ src, err := h.SourcesStore.Get(ctx, srcID)
+ if err != nil {
+ notFound(w, srcID, h.Logger)
+ return
+ }
+
+ db := h.Databases
+ if err = db.Connect(ctx, &src); err != nil {
+ msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
+ Error(w, http.StatusBadRequest, msg, h.Logger)
+ return
+ }
+
+ postedRP := &chronograf.RetentionPolicy{}
+ if err := json.NewDecoder(r.Body).Decode(postedRP); err != nil {
+ invalidJSON(w, h.Logger)
+ return
+ }
+ if err := ValidRetentionPolicyRequest(postedRP); err != nil {
+ invalidData(w, err, h.Logger)
+ return
+ }
+
+ dbID := httprouter.GetParamFromContext(ctx, "dbid")
+ rpID := httprouter.GetParamFromContext(ctx, "rpid")
+ rp, err := db.UpdateRP(ctx, dbID, rpID, postedRP)
+
+ if err != nil {
+ Error(w, http.StatusBadRequest, err.Error(), h.Logger)
+ return
+ }
+
+ res := rpResponse{
+ Name: rp.Name,
+ Duration: rp.Duration,
+ Replication: rp.Replication,
+ ShardDuration: rp.ShardDuration,
+ Default: rp.Default,
+ }
+ res.WithLinks(srcID, dbID)
+ encodeJSON(w, http.StatusCreated, res, h.Logger)
+}
+
+// DropRetentionPolicy removes a retention policy from a database
+func (h *Service) DropRetentionPolicy(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ srcID, err := paramID("id", r)
+ if err != nil {
+ Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
+ return
+ }
+
+ src, err := h.SourcesStore.Get(ctx, srcID)
+ if err != nil {
+ notFound(w, srcID, h.Logger)
+ return
+ }
+
+ db := h.Databases
+ if err = db.Connect(ctx, &src); err != nil {
+ msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
+ Error(w, http.StatusBadRequest, msg, h.Logger)
+ return
+ }
+
+ dbID := httprouter.GetParamFromContext(ctx, "dbid")
+ rpID := httprouter.GetParamFromContext(ctx, "rpid")
+ dropErr := db.DropRP(ctx, dbID, rpID)
+ if dropErr != nil {
+ Error(w, http.StatusBadRequest, dropErr.Error(), h.Logger)
+ return
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+}
+
+// ValidDatabaseRequest checks if the database posted is valid
+func ValidDatabaseRequest(d *chronograf.Database) error {
+ if len(d.Name) == 0 {
+ return fmt.Errorf("name is required")
+ }
+ return nil
+}
+
+// ValidRetentionPolicyRequest checks if a retention policy is valid on POST
+func ValidRetentionPolicyRequest(rp *chronograf.RetentionPolicy) error {
+ if len(rp.Name) == 0 {
+ return fmt.Errorf("name is required")
+ }
+ if len(rp.Duration) == 0 {
+ return fmt.Errorf("duration is required")
+ }
+ if rp.Replication == 0 {
+ return fmt.Errorf("replication factor is invalid")
+ }
+ return nil
+}
diff --git a/server/databases_test.go b/server/databases_test.go
new file mode 100644
index 000000000..c17ee676a
--- /dev/null
+++ b/server/databases_test.go
@@ -0,0 +1,349 @@
+package server
+
+import (
+ "net/http"
+ "testing"
+
+ "github.com/influxdata/chronograf"
+)
+
+func TestService_GetDatabases(t *testing.T) {
+ type fields struct {
+ SourcesStore chronograf.SourcesStore
+ ServersStore chronograf.ServersStore
+ LayoutStore chronograf.LayoutStore
+ AlertRulesStore chronograf.AlertRulesStore
+ UsersStore chronograf.UsersStore
+ DashboardsStore chronograf.DashboardsStore
+ TimeSeriesClient TimeSeriesClient
+ Logger chronograf.Logger
+ UseAuth bool
+ Databases chronograf.Databases
+ }
+ type args struct {
+ w http.ResponseWriter
+ r *http.Request
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ }{
+ // TODO: Add test cases.
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ h := &Service{
+ SourcesStore: tt.fields.SourcesStore,
+ ServersStore: tt.fields.ServersStore,
+ LayoutStore: tt.fields.LayoutStore,
+ AlertRulesStore: tt.fields.AlertRulesStore,
+ UsersStore: tt.fields.UsersStore,
+ DashboardsStore: tt.fields.DashboardsStore,
+ TimeSeriesClient: tt.fields.TimeSeriesClient,
+ Logger: tt.fields.Logger,
+ UseAuth: tt.fields.UseAuth,
+ Databases: tt.fields.Databases,
+ }
+ h.GetDatabases(tt.args.w, tt.args.r)
+ })
+ }
+}
+
+func TestService_NewDatabase(t *testing.T) {
+ type fields struct {
+ SourcesStore chronograf.SourcesStore
+ ServersStore chronograf.ServersStore
+ LayoutStore chronograf.LayoutStore
+ AlertRulesStore chronograf.AlertRulesStore
+ UsersStore chronograf.UsersStore
+ DashboardsStore chronograf.DashboardsStore
+ TimeSeriesClient TimeSeriesClient
+ Logger chronograf.Logger
+ UseAuth bool
+ Databases chronograf.Databases
+ }
+ type args struct {
+ w http.ResponseWriter
+ r *http.Request
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ }{
+ // TODO: Add test cases.
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ h := &Service{
+ SourcesStore: tt.fields.SourcesStore,
+ ServersStore: tt.fields.ServersStore,
+ LayoutStore: tt.fields.LayoutStore,
+ AlertRulesStore: tt.fields.AlertRulesStore,
+ UsersStore: tt.fields.UsersStore,
+ DashboardsStore: tt.fields.DashboardsStore,
+ TimeSeriesClient: tt.fields.TimeSeriesClient,
+ Logger: tt.fields.Logger,
+ UseAuth: tt.fields.UseAuth,
+ Databases: tt.fields.Databases,
+ }
+ h.NewDatabase(tt.args.w, tt.args.r)
+ })
+ }
+}
+
+func TestService_DropDatabase(t *testing.T) {
+ type fields struct {
+ SourcesStore chronograf.SourcesStore
+ ServersStore chronograf.ServersStore
+ LayoutStore chronograf.LayoutStore
+ AlertRulesStore chronograf.AlertRulesStore
+ UsersStore chronograf.UsersStore
+ DashboardsStore chronograf.DashboardsStore
+ TimeSeriesClient TimeSeriesClient
+ Logger chronograf.Logger
+ UseAuth bool
+ Databases chronograf.Databases
+ }
+ type args struct {
+ w http.ResponseWriter
+ r *http.Request
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ }{
+ // TODO: Add test cases.
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ h := &Service{
+ SourcesStore: tt.fields.SourcesStore,
+ ServersStore: tt.fields.ServersStore,
+ LayoutStore: tt.fields.LayoutStore,
+ AlertRulesStore: tt.fields.AlertRulesStore,
+ UsersStore: tt.fields.UsersStore,
+ DashboardsStore: tt.fields.DashboardsStore,
+ TimeSeriesClient: tt.fields.TimeSeriesClient,
+ Logger: tt.fields.Logger,
+ UseAuth: tt.fields.UseAuth,
+ Databases: tt.fields.Databases,
+ }
+ h.DropDatabase(tt.args.w, tt.args.r)
+ })
+ }
+}
+
+func TestService_RetentionPolicies(t *testing.T) {
+ type fields struct {
+ SourcesStore chronograf.SourcesStore
+ ServersStore chronograf.ServersStore
+ LayoutStore chronograf.LayoutStore
+ AlertRulesStore chronograf.AlertRulesStore
+ UsersStore chronograf.UsersStore
+ DashboardsStore chronograf.DashboardsStore
+ TimeSeriesClient TimeSeriesClient
+ Logger chronograf.Logger
+ UseAuth bool
+ Databases chronograf.Databases
+ }
+ type args struct {
+ w http.ResponseWriter
+ r *http.Request
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ }{
+ // TODO: Add test cases.
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ h := &Service{
+ SourcesStore: tt.fields.SourcesStore,
+ ServersStore: tt.fields.ServersStore,
+ LayoutStore: tt.fields.LayoutStore,
+ AlertRulesStore: tt.fields.AlertRulesStore,
+ UsersStore: tt.fields.UsersStore,
+ DashboardsStore: tt.fields.DashboardsStore,
+ TimeSeriesClient: tt.fields.TimeSeriesClient,
+ Logger: tt.fields.Logger,
+ UseAuth: tt.fields.UseAuth,
+ Databases: tt.fields.Databases,
+ }
+ h.RetentionPolicies(tt.args.w, tt.args.r)
+ })
+ }
+}
+
+func TestService_NewRetentionPolicy(t *testing.T) {
+ type fields struct {
+ SourcesStore chronograf.SourcesStore
+ ServersStore chronograf.ServersStore
+ LayoutStore chronograf.LayoutStore
+ AlertRulesStore chronograf.AlertRulesStore
+ UsersStore chronograf.UsersStore
+ DashboardsStore chronograf.DashboardsStore
+ TimeSeriesClient TimeSeriesClient
+ Logger chronograf.Logger
+ UseAuth bool
+ Databases chronograf.Databases
+ }
+ type args struct {
+ w http.ResponseWriter
+ r *http.Request
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ }{
+ // TODO: Add test cases.
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ h := &Service{
+ SourcesStore: tt.fields.SourcesStore,
+ ServersStore: tt.fields.ServersStore,
+ LayoutStore: tt.fields.LayoutStore,
+ AlertRulesStore: tt.fields.AlertRulesStore,
+ UsersStore: tt.fields.UsersStore,
+ DashboardsStore: tt.fields.DashboardsStore,
+ TimeSeriesClient: tt.fields.TimeSeriesClient,
+ Logger: tt.fields.Logger,
+ UseAuth: tt.fields.UseAuth,
+ Databases: tt.fields.Databases,
+ }
+ h.NewRetentionPolicy(tt.args.w, tt.args.r)
+ })
+ }
+}
+
+func TestService_UpdateRetentionPolicy(t *testing.T) {
+ type fields struct {
+ SourcesStore chronograf.SourcesStore
+ ServersStore chronograf.ServersStore
+ LayoutStore chronograf.LayoutStore
+ AlertRulesStore chronograf.AlertRulesStore
+ UsersStore chronograf.UsersStore
+ DashboardsStore chronograf.DashboardsStore
+ TimeSeriesClient TimeSeriesClient
+ Logger chronograf.Logger
+ UseAuth bool
+ Databases chronograf.Databases
+ }
+ type args struct {
+ w http.ResponseWriter
+ r *http.Request
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ }{
+ // TODO: Add test cases.
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ h := &Service{
+ SourcesStore: tt.fields.SourcesStore,
+ ServersStore: tt.fields.ServersStore,
+ LayoutStore: tt.fields.LayoutStore,
+ AlertRulesStore: tt.fields.AlertRulesStore,
+ UsersStore: tt.fields.UsersStore,
+ DashboardsStore: tt.fields.DashboardsStore,
+ TimeSeriesClient: tt.fields.TimeSeriesClient,
+ Logger: tt.fields.Logger,
+ UseAuth: tt.fields.UseAuth,
+ Databases: tt.fields.Databases,
+ }
+ h.UpdateRetentionPolicy(tt.args.w, tt.args.r)
+ })
+ }
+}
+
+func TestService_DropRetentionPolicy(t *testing.T) {
+ type fields struct {
+ SourcesStore chronograf.SourcesStore
+ ServersStore chronograf.ServersStore
+ LayoutStore chronograf.LayoutStore
+ AlertRulesStore chronograf.AlertRulesStore
+ UsersStore chronograf.UsersStore
+ DashboardsStore chronograf.DashboardsStore
+ TimeSeriesClient TimeSeriesClient
+ Logger chronograf.Logger
+ UseAuth bool
+ Databases chronograf.Databases
+ }
+ type args struct {
+ w http.ResponseWriter
+ r *http.Request
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ }{
+ // TODO: Add test cases.
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ h := &Service{
+ SourcesStore: tt.fields.SourcesStore,
+ ServersStore: tt.fields.ServersStore,
+ LayoutStore: tt.fields.LayoutStore,
+ AlertRulesStore: tt.fields.AlertRulesStore,
+ UsersStore: tt.fields.UsersStore,
+ DashboardsStore: tt.fields.DashboardsStore,
+ TimeSeriesClient: tt.fields.TimeSeriesClient,
+ Logger: tt.fields.Logger,
+ UseAuth: tt.fields.UseAuth,
+ Databases: tt.fields.Databases,
+ }
+ h.DropRetentionPolicy(tt.args.w, tt.args.r)
+ })
+ }
+}
+
+func TestValidDatabaseRequest(t *testing.T) {
+ type args struct {
+ d *chronograf.Database
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ // TODO: Add test cases.
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := ValidDatabaseRequest(tt.args.d); (err != nil) != tt.wantErr {
+ t.Errorf("ValidDatabaseRequest() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func TestValidRetentionPolicyRequest(t *testing.T) {
+ type args struct {
+ rp *chronograf.RetentionPolicy
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ // TODO: Add test cases.
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := ValidRetentionPolicyRequest(tt.args.rp); (err != nil) != tt.wantErr {
+ t.Errorf("ValidRetentionPolicyRequest() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/server/mux.go b/server/mux.go
index 55eecfc75..a996f446d 100644
--- a/server/mux.go
+++ b/server/mux.go
@@ -138,6 +138,19 @@ func NewMux(opts MuxOpts, service Service) http.Handler {
router.DELETE("/chronograf/v1/dashboards/:id/cells/:cid", service.RemoveDashboardCell)
router.PUT("/chronograf/v1/dashboards/:id/cells/:cid", service.ReplaceDashboardCell)
+ // Databases
+ router.GET("/chronograf/v1/sources/:id/dbs", service.GetDatabases)
+ router.POST("/chronograf/v1/sources/:id/dbs", service.NewDatabase)
+
+ router.DELETE("/chronograf/v1/sources/:id/dbs/:dbid", service.DropDatabase)
+
+ // Retention Policies
+ router.GET("/chronograf/v1/sources/:id/dbs/:dbid/rps", service.RetentionPolicies)
+ router.POST("/chronograf/v1/sources/:id/dbs/:dbid/rps", service.NewRetentionPolicy)
+
+ router.PUT("/chronograf/v1/sources/:id/dbs/:dbid/rps/:rpid", service.UpdateRetentionPolicy)
+ router.DELETE("/chronograf/v1/sources/:id/dbs/:dbid/rps/:rpid", service.DropRetentionPolicy)
+
var authRoutes AuthRoutes
var out http.Handler
diff --git a/server/server.go b/server/server.go
index b80e11c42..a19a0c077 100644
--- a/server/server.go
+++ b/server/server.go
@@ -22,6 +22,7 @@ import (
client "github.com/influxdata/usage-client/v1"
flags "github.com/jessevdk/go-flags"
"github.com/tylerb/graceful"
+ "github.com/influxdata/chronograf/influx"
)
var (
@@ -293,6 +294,7 @@ func openService(ctx context.Context, boltPath, cannedPath string, logger chrono
AlertRulesStore: db.AlertsStore,
Logger: logger,
UseAuth: useAuth,
+ Databases: &influx.Client{Logger: logger},
}
}
diff --git a/server/service.go b/server/service.go
index 6f44b9626..3da9f2d64 100644
--- a/server/service.go
+++ b/server/service.go
@@ -20,6 +20,7 @@ type Service struct {
TimeSeriesClient TimeSeriesClient
Logger chronograf.Logger
UseAuth bool
+ Databases chronograf.Databases
}
// TimeSeriesClient returns the correct client for a time series database.
diff --git a/server/sources.go b/server/sources.go
index 55c505229..d56a85ecd 100644
--- a/server/sources.go
+++ b/server/sources.go
@@ -18,6 +18,7 @@ type sourceLinks struct {
Permissions string `json:"permissions"` // URL for all allowed permissions for this source
Users string `json:"users"` // URL for all users associated with this source
Roles string `json:"roles,omitempty"` // URL for all users associated with this source
+ Databases string `json:"databases"` // URL for the databases contained within this soure
}
type sourceResponse struct {
@@ -43,6 +44,7 @@ func newSourceResponse(src chronograf.Source) sourceResponse {
Kapacitors: fmt.Sprintf("%s/%d/kapacitors", httpAPISrcs, src.ID),
Permissions: fmt.Sprintf("%s/%d/permissions", httpAPISrcs, src.ID),
Users: fmt.Sprintf("%s/%d/users", httpAPISrcs, src.ID),
+ Databases: fmt.Sprintf("%s/%d/dbs", httpAPISrcs, src.ID),
},
}
diff --git a/server/sources_test.go b/server/sources_test.go
index 729d9a504..23d79a07e 100644
--- a/server/sources_test.go
+++ b/server/sources_test.go
@@ -30,6 +30,7 @@ func Test_newSourceResponse(t *testing.T) {
Kapacitors: "/chronograf/v1/sources/1/kapacitors",
Users: "/chronograf/v1/sources/1/users",
Permissions: "/chronograf/v1/sources/1/permissions",
+ Databases: "/chronograf/v1/sources/1/dbs",
},
},
},
@@ -50,6 +51,7 @@ func Test_newSourceResponse(t *testing.T) {
Kapacitors: "/chronograf/v1/sources/1/kapacitors",
Users: "/chronograf/v1/sources/1/users",
Permissions: "/chronograf/v1/sources/1/permissions",
+ Databases: "/chronograf/v1/sources/1/dbs",
},
},
},
diff --git a/server/swagger.json b/server/swagger.json
index 3ed4a9851..97965471b 100644
--- a/server/swagger.json
+++ b/server/swagger.json
@@ -769,6 +769,329 @@
}
}
},
+ "/sources/{id}/dbs/": {
+ "get": {
+ "tags": [
+ "databases"
+ ],
+ "summary": "Retrieve all databases for a source",
+ "parameters": [
+ {
+ "name": "id",
+ "in": "path",
+ "type": "string",
+ "description": "ID of the data source",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Listing of all databases for a source",
+ "schema": {
+ "$ref": "#/definitions/Databases"
+ }
+ },
+ "404": {
+ "description": "Data source id does not exist.",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ },
+ "default": {
+ "description": "A processing or an unexpected error.",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ },
+ "post": {
+ "tags": [
+ "databases"
+ ],
+ "summary": "Create new database for a source",
+ "parameters": [
+ {
+ "name": "id",
+ "in": "path",
+ "type": "string",
+ "description": "ID of the data source",
+ "required": true
+ },
+ {
+ "name": "database",
+ "in": "body",
+ "description": "Configuration options for a database",
+ "schema": {
+ "$ref": "#/definitions/Database"
+ },
+ "required": true
+ }
+ ],
+ "responses": {
+ "201": {
+ "description": "Database successfully created.",
+ "schema": {
+ "$ref": "#/definitions/Database"
+ }
+ },
+ "404": {
+ "description": "Data source id does not exist.",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ },
+ "default": {
+ "description": "A processing or an unexpected error.",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ },
+ "/sources/{id}/dbs/{db_id}": {
+ "delete": {
+ "tags": [
+ "databases"
+ ],
+ "summary": "Delete database for a source",
+ "parameters": [
+ {
+ "name": "id",
+ "in": "path",
+ "type": "string",
+ "description": "ID of the data source",
+ "required": true
+ },
+ {
+ "name": "db_id",
+ "in": "path",
+ "type": "string",
+ "description": "ID of the database",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "Database has been deleted",
+ },
+ "404": {
+ "description": "Data source id does not exist.",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ },
+ "default": {
+ "description": "A processing or an unexpected error.",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ },
+ "/sources/{id}/dbs/{db_id}/rps": {
+ "get": {
+ "tags": [
+ "retention policies"
+ ],
+ "summary": "Retrieve all retention policies for a database",
+ "parameters": [
+ {
+ "name": "id",
+ "in": "path",
+ "type": "string",
+ "description": "ID of the data source",
+ "required": true
+ },
+ {
+ "name": "db_id",
+ "in": "path",
+ "type": "string",
+ "description": "ID of the database",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Listing of all retention policies for a database",
+ "schema": {
+ "$ref": "#/definitions/RetentionPolicies"
+ }
+ },
+ "404": {
+ "description": "Specified retention policy does not exist.",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ },
+ "default": {
+ "description": "A processing or an unexpected error.",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ },
+ "post": {
+ "tags": [
+ "retention policies"
+ ],
+ "summary": "Create new retention policy for a database",
+ "parameters": [
+ {
+ "name": "id",
+ "in": "path",
+ "type": "string",
+ "description": "ID of the data source",
+ "required": true
+ },
+ {
+ "name": "db_id",
+ "in": "path",
+ "type": "string",
+ "description": "ID of the database",
+ "required": true
+ },
+ {
+ "name": "rp",
+ "in": "body",
+ "description": "Configuration options for the retention policy",
+ "schema": {
+ "$ref": "#/definitions/RetentionPolicy"
+ },
+ "required": true
+ }
+ ],
+ "responses": {
+ "201": {
+ "description": "Retention Policy successfully created.",
+ "schema": {
+ "$ref": "#/definitions/RetentionPolicy"
+ }
+ },
+ "404": {
+ "description": "Data source id does not exist.",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ },
+ "default": {
+ "description": "A processing or an unexpected error.",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ },
+ "/sources/{id}/dbs/{db_id}/rps/{rp_id}": {
+ "patch": {
+ "tags": [
+ "retention policies"
+ ],
+ "summary": "Alter retention policy for a database",
+ "parameters": [
+ {
+ "name": "id",
+ "in": "path",
+ "type": "string",
+ "description": "ID of the data source",
+ "required": true
+ },
+ {
+ "name": "db_id",
+ "in": "path",
+ "type": "string",
+ "description": "ID of the database",
+ "required": true
+ },
+ {
+ "name": "rp_id",
+ "in": "path",
+ "type": "string",
+ "description": "ID of the retention policy",
+ "required": true
+ },
+ {
+ "name": "rp",
+ "in": "body",
+ "description": "Configuration options for the retention policy",
+ "schema": {
+ "$ref": "#/definitions/RetentionPolicy"
+ },
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Retention Policy was altered",
+ "schema": {
+ "$ref": "#/definitions/RetentionPolicy"
+ }
+ },
+ "404": {
+ "description": "Database or source does not exist.",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ },
+ "default": {
+ "description": "A processing or an unexpected error.",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ },
+ "delete": {
+ "tags": [
+ "retention policies"
+ ],
+ "summary": "Delete retention policy for a database",
+ "parameters": [
+ {
+ "name": "id",
+ "in": "path",
+ "type": "string",
+ "description": "ID of the data source",
+ "required": true
+ },
+ {
+ "name": "db_id",
+ "in": "path",
+ "type": "string",
+ "description": "ID of the database",
+ "required": true
+ },
+ {
+ "name": "rp_id",
+ "in": "path",
+ "type": "string",
+ "description": "ID of the retention policy",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "Retention Policy has been deleted",
+ },
+ "404": {
+ "description": "Data source id does not exist.",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ },
+ "default": {
+ "description": "A processing or an unexpected error.",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ },
"/sources/{id}/kapacitors": {
"get": {
"tags": [
@@ -1912,6 +2235,70 @@
}
},
"definitions": {
+ "Databases": {
+ "type": "object",
+ "required": [
+ "databases"
+ ],
+ "properties": {
+ "databases": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/Database"
+ }
+ }
+ }
+ },
+ "Database": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "example": {
+ "name": "NOAA_water_database",
+ "duration": "3d",
+ "replication": 3,
+ "shardDuration": "3h",
+ "links": {
+ "self": "/chronograf/v1/sources/1/dbs/NOAA_water_database",
+ "rps": "/chronograf/v1/sources/1/dbs/NOAA_water_database/rps"
+ }
+ },
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The identifying name of the database",
+ },
+ "duration": {
+ "type": "string",
+ "description": "the duration of the default retention policy"
+ },
+ "replication": {
+ "type": "integer",
+ "format": "int32",
+ "description": "how many copies of the data are stored in the cluster"
+ },
+ "shardDuration": {
+ "type": "string",
+ "description": "the interval spanned by each shard group"
+ },
+ "links": {
+ "type": "object",
+ "properties": {
+ "self": {
+ "type": "string",
+ "description": "Self link mapping to this resource",
+ "format": "url"
+ },
+ "rps": {
+ "type": "string",
+ "description": "Link to retention policies for this database",
+ "format": "url"
+ }
+ }
+ }
+ }
+ },
"Kapacitors": {
"type": "object",
"required": [
@@ -2150,6 +2537,71 @@
}
}
},
+ "RetentionPolicies": {
+ "type": "object",
+ "required": [
+ "retentionPolicies"
+ ],
+ "properties": {
+ "retentionPolicies": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/RetentionPolicy"
+ }
+ }
+ }
+ },
+ "RetentionPolicy": {
+ "type": "object",
+ "required": [
+ "name",
+ "duration",
+ "replication"
+ ],
+ "example": {
+ "name": "weekly",
+ "duration": "7d",
+ "replication": 1,
+ "shardDuration": "7d",
+ "default": true,
+ "links": {
+ "self": "/chronograf/v1/ousrces/1/dbs/NOAA_water_database/rps/liquid"
+ }
+ },
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The identifying name of the retention policy",
+ },
+ "duration": {
+ "type": "string",
+ "description": "the duration of the retention policy"
+ },
+ "replication": {
+ "type": "integer",
+ "format": "int32",
+ "description": "how many copies of the data are stored in the cluster"
+ },
+ "shardDuration": {
+ "type": "string",
+ "description": "the interval spanned by each shard group"
+ },
+ "default": {
+ "type": "boolean",
+ "description": "Indicates whether this retention policy should be the default"
+ },
+ "links": {
+ "type": "object",
+ "properties": {
+ "self": {
+ "type": "string",
+ "description": "Self link mapping to this resource",
+ "format": "url"
+ }
+ }
+ }
+ }
+ },
"Rule": {
"type": "object",
"example": {
diff --git a/ui/spec/admin/reducers/adminSpec.js b/ui/spec/admin/reducers/adminSpec.js
index 9c293b3dd..5d8dec75e 100644
--- a/ui/spec/admin/reducers/adminSpec.js
+++ b/ui/spec/admin/reducers/adminSpec.js
@@ -3,18 +3,33 @@ import reducer from 'src/admin/reducers/admin'
import {
addUser,
addRole,
+ addDatabase,
+ addRetentionPolicy,
syncUser,
syncRole,
editUser,
editRole,
+ editDatabase,
+ editRetentionPolicy,
loadRoles,
loadPermissions,
deleteRole,
deleteUser,
+ removeDatabase,
+ removeRetentionPolicy,
filterRoles,
filterUsers,
+ addDatabaseDeleteCode,
+ removeDatabaseDeleteCode,
} from 'src/admin/actions'
+import {
+ NEW_DEFAULT_USER,
+ NEW_DEFAULT_ROLE,
+ NEW_DEFAULT_DATABASE,
+ NEW_EMPTY_RP,
+} from 'src/admin/constants'
+
let state = undefined
// Users
@@ -58,14 +73,6 @@ const u2 = {
links: {self: '/chronograf/v1/sources/1/users/zerocool'},
}
const users = [u1, u2]
-const newDefaultUser = {
- name: '',
- password: '',
- roles: [],
- permissions: [],
- links: {self: ''},
- isNew: true,
-}
// Roles
const r1 = {
@@ -103,20 +110,118 @@ const r2 = {
links: {self: '/chronograf/v1/sources/1/roles/l33tus3r'}
}
const roles = [r1, r2]
-const newDefaultRole = {
- name: '',
- users: [],
- permissions: [],
- links: {self: ''},
- isNew: true,
-}
// Permissions
const global = {scope: 'all', allowed: ['p1', 'p2']}
const scoped = {scope: 'db1', allowed: ['p1', 'p3']}
const permissions = [global, scoped]
+// Databases && Retention Policies
+const rp1 = {
+ name: 'rp1',
+ duration: '0',
+ replication: 2,
+ isDefault: true,
+ links: {self: '/chronograf/v1/sources/1/db/db1/rp/rp1'},
+}
+
+const db1 = {
+ name: 'db1',
+ links: {self: '/chronograf/v1/sources/1/db/db1'},
+ retentionPolicies: [rp1],
+}
+
+const db2 = {
+ name: 'db2',
+ links: {self: '/chronograf/v1/sources/1/db/db2'},
+ retentionPolicies: [],
+ deleteCode: 'DELETE',
+}
+
describe('Admin.Reducers', () => {
+ describe('Databases', () => {
+ const state = {databases: [db1, db2]}
+
+ it('can add a database', () => {
+ const actual = reducer(state, addDatabase())
+ const expected = [
+ {...NEW_DEFAULT_DATABASE, isEditing: true},
+ db1,
+ db2,
+ ]
+
+ expect(actual.databases).to.deep.equal(expected)
+ })
+
+ it('can edit a database', () => {
+ const updates = {name: 'dbOne'}
+ const actual = reducer(state, editDatabase(db1, updates))
+ const expected = [{...db1, ...updates}, db2]
+
+ expect(actual.databases).to.deep.equal(expected)
+ })
+
+ it('can remove a database', () => {
+ const actual = reducer(state, removeDatabase(db1))
+ const expected = [db2]
+
+ expect(actual.databases).to.deep.equal(expected)
+ })
+
+ it('can add a database delete code', () => {
+ const actual = reducer(state, addDatabaseDeleteCode(db1))
+ const expected = [
+ {...db1, deleteCode: ''},
+ db2,
+ ]
+
+ expect(actual.databases).to.deep.equal(expected)
+ })
+
+ it('can remove the delete code', () => {
+ const actual = reducer(state, removeDatabaseDeleteCode(db2))
+ delete db2.deleteCode
+ const expected = [
+ db1,
+ db2,
+ ]
+
+ expect(actual.databases).to.deep.equal(expected)
+ })
+ })
+
+ describe('Retention Policies', () => {
+ const state = {databases: [db1]}
+
+ it('can add a retention policy', () => {
+ const actual = reducer(state, addRetentionPolicy(db1))
+ const expected = [
+ {...db1, retentionPolicies: [NEW_EMPTY_RP, rp1]},
+ ]
+
+ expect(actual.databases).to.deep.equal(expected)
+ })
+
+ it('can remove a retention policy', () => {
+ const actual = reducer(state, removeRetentionPolicy(db1, rp1))
+ const expected = [
+ {...db1, retentionPolicies: []},
+ ]
+
+ expect(actual.databases).to.deep.equal(expected)
+ })
+
+ it('can edit a retention policy', () => {
+ const updates = {name: 'rpOne', duration: '100y', replication: '42'}
+ const actual = reducer(state, editRetentionPolicy(db1, rp1, updates))
+ const expected = [
+ {...db1, retentionPolicies: [{...rp1, ...updates}]},
+ ]
+
+ expect(actual.databases).to.deep.equal(expected)
+ })
+ })
+
it('it can add a user', () => {
state = {
users: [
@@ -127,7 +232,7 @@ describe('Admin.Reducers', () => {
const actual = reducer(state, addUser())
const expected = {
users: [
- {...newDefaultUser, isEditing: true},
+ {...NEW_DEFAULT_USER, isEditing: true},
u1,
],
}
@@ -171,7 +276,7 @@ describe('Admin.Reducers', () => {
const actual = reducer(state, addRole())
const expected = {
roles: [
- {...newDefaultRole, isEditing: true},
+ {...NEW_DEFAULT_ROLE, isEditing: true},
r1,
],
}
diff --git a/ui/spec/utils/formattingSpec.js b/ui/spec/utils/formattingSpec.js
index 2f0a8040f..97f8d0ba5 100644
--- a/ui/spec/utils/formattingSpec.js
+++ b/ui/spec/utils/formattingSpec.js
@@ -25,7 +25,7 @@ describe('Formatting helpers', () => {
it("returns 'infinite' for a retention policy with a value of '0'", () => {
const actual = formatRPDuration('0')
- expect(actual).to.equal('infinite');
+ expect(actual).to.equal('∞');
});
it('correctly formats retention policy durations', () => {
diff --git a/ui/src/admin/actions/index.js b/ui/src/admin/actions/index.js
index a662ca8ac..c23b71ba4 100644
--- a/ui/src/admin/actions/index.js
+++ b/ui/src/admin/actions/index.js
@@ -2,15 +2,24 @@ import {
getUsers as getUsersAJAX,
getRoles as getRolesAJAX,
getPermissions as getPermissionsAJAX,
+ getDbsAndRps as getDbsAndRpsAJAX,
createUser as createUserAJAX,
createRole as createRoleAJAX,
+ createDatabase as createDatabaseAJAX,
+ createRetentionPolicy as createRetentionPolicyAJAX,
deleteUser as deleteUserAJAX,
deleteRole as deleteRoleAJAX,
+ deleteDatabase as deleteDatabaseAJAX,
+ deleteRetentionPolicy as deleteRetentionPolicyAJAX,
updateRole as updateRoleAJAX,
updateUser as updateUserAJAX,
+ updateRetentionPolicy as updateRetentionPolicyAJAX,
} from 'src/admin/apis'
-import {killQuery as killQueryProxy} from 'shared/apis/metaQuery'
+import {
+ killQuery as killQueryProxy,
+} from 'shared/apis/metaQuery'
+
import {publishNotification} from 'src/shared/actions/notifications';
import {ADMIN_NOTIFICATION_DELAY} from 'src/admin/constants'
@@ -35,6 +44,13 @@ export const loadPermissions = ({permissions}) => ({
},
})
+export const loadDatabases = (databases) => ({
+ type: 'LOAD_DATABASES',
+ payload: {
+ databases,
+ },
+})
+
export const addUser = () => ({
type: 'ADD_USER',
})
@@ -43,6 +59,17 @@ export const addRole = () => ({
type: 'ADD_ROLE',
})
+export const addDatabase = () => ({
+ type: 'ADD_DATABASE',
+})
+
+export const addRetentionPolicy = (database) => ({
+ type: 'ADD_RETENTION_POLICY',
+ payload: {
+ database,
+ },
+})
+
export const syncUser = (staleUser, syncedUser) => ({
type: 'SYNC_USER',
payload: {
@@ -59,6 +86,24 @@ export const syncRole = (staleRole, syncedRole) => ({
},
})
+export const syncDatabase = (stale, synced) => ({
+ type: 'SYNC_DATABASE',
+ payload: {
+ stale,
+ synced,
+ },
+})
+
+export const syncRetentionPolicy = (database, stale, synced) => ({
+ type: 'SYNC_RETENTION_POLICY',
+ payload: {
+ database,
+ stale,
+ synced,
+ },
+})
+
+
export const editUser = (user, updates) => ({
type: 'EDIT_USER',
payload: {
@@ -75,6 +120,14 @@ export const editRole = (role, updates) => ({
},
})
+export const editDatabase = (database, updates) => ({
+ type: 'EDIT_DATABASE',
+ payload: {
+ database,
+ updates,
+ },
+})
+
export const killQuery = (queryID) => ({
type: 'KILL_QUERY',
payload: {
@@ -96,6 +149,7 @@ export const loadQueries = (queries) => ({
},
})
+// TODO: change to 'removeUser'
export const deleteUser = (user) => ({
type: 'DELETE_USER',
payload: {
@@ -103,6 +157,7 @@ export const deleteUser = (user) => ({
},
})
+// TODO: change to 'removeRole'
export const deleteRole = (role) => ({
type: 'DELETE_ROLE',
payload: {
@@ -110,6 +165,21 @@ export const deleteRole = (role) => ({
},
})
+export const removeDatabase = (database) => ({
+ type: 'REMOVE_DATABASE',
+ payload: {
+ database,
+ },
+})
+
+export const removeRetentionPolicy = (database, retentionPolicy) => ({
+ type: 'REMOVE_RETENTION_POLICY',
+ payload: {
+ database,
+ retentionPolicy,
+ },
+})
+
export const filterUsers = (text) => ({
type: 'FILTER_USERS',
payload: {
@@ -124,6 +194,29 @@ export const filterRoles = (text) => ({
},
})
+export const addDatabaseDeleteCode = (database) => ({
+ type: 'ADD_DATABASE_DELETE_CODE',
+ payload: {
+ database,
+ },
+})
+
+export const removeDatabaseDeleteCode = (database) => ({
+ type: 'REMOVE_DATABASE_DELETE_CODE',
+ payload: {
+ database,
+ },
+})
+
+export const editRetentionPolicy = (database, retentionPolicy, updates) => ({
+ type: 'EDIT_RETENTION_POLICY',
+ payload: {
+ database,
+ retentionPolicy,
+ updates,
+ },
+})
+
// async actions
export const loadUsersAsync = (url) => async (dispatch) => {
const {data} = await getUsersAJAX(url)
@@ -140,6 +233,11 @@ export const loadPermissionsAsync = (url) => async (dispatch) => {
dispatch(loadPermissions(data))
}
+export const loadDBsAndRPsAsync = (url) => async (dispatch) => {
+ const {data: {databases}} = await getDbsAndRpsAJAX(url)
+ dispatch(loadDatabases(databases))
+}
+
export const createUserAsync = (url, user) => async (dispatch) => {
try {
const {data} = await createUserAJAX(url, user)
@@ -164,6 +262,41 @@ export const createRoleAsync = (url, role) => async (dispatch) => {
}
}
+export const createDatabaseAsync = (url, database) => async (dispatch) => {
+ try {
+ const {data} = await createDatabaseAJAX(url, database)
+ dispatch(syncDatabase(database, data))
+ dispatch(publishNotification('success', 'Database created successfully'))
+ } catch (error) {
+ // undo optimistic update
+ dispatch(publishNotification('error', `Failed to create database: ${error.data.message}`))
+ setTimeout(() => dispatch(removeDatabase(database)), ADMIN_NOTIFICATION_DELAY)
+ }
+}
+
+export const createRetentionPolicyAsync = (database, retentionPolicy) => async (dispatch) => {
+ try {
+ const {data} = await createRetentionPolicyAJAX(database.links.retentionPolicies, retentionPolicy)
+ dispatch(publishNotification('success', 'Retention policy created successfully'))
+ dispatch(syncRetentionPolicy(database, retentionPolicy, data))
+ } catch (error) {
+ // undo optimistic update
+ dispatch(publishNotification('error', `Failed to create retention policy: ${error.data.message}`))
+ setTimeout(() => dispatch(removeRetentionPolicy(database, retentionPolicy)), ADMIN_NOTIFICATION_DELAY)
+ }
+}
+
+export const updateRetentionPolicyAsync = (database, retentionPolicy, updates) => async (dispatch) => {
+ try {
+ dispatch(editRetentionPolicy(database, retentionPolicy, updates))
+ const {data} = await updateRetentionPolicyAJAX(retentionPolicy.links.self, updates)
+ dispatch(publishNotification('success', 'Retention policy updated successfully'))
+ dispatch(syncRetentionPolicy(database, retentionPolicy, data))
+ } catch (error) {
+ dispatch(publishNotification('error', `Failed to update retention policy: ${error.data.message}`))
+ }
+}
+
export const killQueryAsync = (source, queryID) => (dispatch) => {
// optimistic update
dispatch(killQuery(queryID))
@@ -189,6 +322,26 @@ export const deleteUserAsync = (user, addFlashMessage) => (dispatch) => {
deleteUserAJAX(user.links.self, addFlashMessage, user.name)
}
+export const deleteDatabaseAsync = (database) => async (dispatch) => {
+ dispatch(removeDatabase(database))
+ dispatch(publishNotification('success', 'Database deleted'))
+ try {
+ await deleteDatabaseAJAX(database.links.self)
+ } catch (error) {
+ dispatch(publishNotification('error', `Failed to delete database: ${error.data.message}`))
+ }
+}
+
+export const deleteRetentionPolicyAsync = (database, retentionPolicy) => async (dispatch) => {
+ dispatch(removeRetentionPolicy(database, retentionPolicy))
+ dispatch(publishNotification('success', `Retention policy ${retentionPolicy.name} deleted`))
+ try {
+ await deleteRetentionPolicyAJAX(retentionPolicy.links.self)
+ } catch (error) {
+ dispatch(publishNotification('error', `Failed to delete retentionPolicy: ${error.data.message}`))
+ }
+}
+
export const updateRoleUsersAsync = (role, users) => async (dispatch) => {
try {
const {data} = await updateRoleAJAX(role.links.self, users, role.permissions)
diff --git a/ui/src/admin/apis/index.js b/ui/src/admin/apis/index.js
index 6c7d747ae..7c411ba0f 100644
--- a/ui/src/admin/apis/index.js
+++ b/ui/src/admin/apis/index.js
@@ -36,6 +36,18 @@ export const getPermissions = async (url) => {
}
}
+export const getDbsAndRps = async (url) => {
+ try {
+ return await AJAX({
+ method: 'GET',
+ url,
+ })
+ } catch (error) {
+ console.error(error)
+ throw error
+ }
+}
+
export const createUser = async (url, user) => {
try {
return await AJAX({
@@ -60,6 +72,41 @@ export const createRole = async (url, role) => {
}
}
+export const createDatabase = async (url, database) => {
+ try {
+ return await AJAX({
+ method: 'POST',
+ url,
+ data: database,
+ })
+ } catch (error) {
+ throw error
+ }
+}
+
+export const createRetentionPolicy = async (url, retentionPolicy) => {
+ try {
+ return await AJAX({
+ method: 'POST',
+ url,
+ data: retentionPolicy,
+ })
+ } catch (error) {
+ throw error
+ }
+}
+
+export const deleteRetentionPolicy = async (url) => {
+ try {
+ return await AJAX({
+ method: 'DELETE',
+ url,
+ })
+ } catch (error) {
+ throw error
+ }
+}
+
export const deleteRole = async (url, addFlashMessage, rolename) => {
try {
const response = await AJAX({
@@ -100,6 +147,18 @@ export const deleteUser = async (url, addFlashMessage, username) => {
}
}
+export const deleteDatabase = async (url) => {
+ try {
+ return await AJAX({
+ method: 'DELETE',
+ url,
+ })
+ } catch (error) {
+ console.error(error)
+ throw error
+ }
+}
+
export const updateRole = async (url, users, permissions) => {
try {
return await AJAX({
@@ -131,3 +190,18 @@ export const updateUser = async (url, roles, permissions) => {
throw error
}
}
+
+export const updateRetentionPolicy = async (url, retentionPolicy) => {
+ try {
+ return await AJAX({
+ method: 'PUT',
+ url,
+ data: {
+ ...retentionPolicy,
+ },
+ })
+ } catch (error) {
+ console.error(error)
+ throw error
+ }
+}
diff --git a/ui/src/admin/components/AdminTabs.js b/ui/src/admin/components/AdminTabs.js
index da9e1f0d2..7c7e898dd 100644
--- a/ui/src/admin/components/AdminTabs.js
+++ b/ui/src/admin/components/AdminTabs.js
@@ -1,8 +1,9 @@
import React, {PropTypes} from 'react'
-import {Tab, Tabs, TabPanel, TabPanels, TabList} from 'src/shared/components/Tabs';
+import {Tab, Tabs, TabPanel, TabPanels, TabList} from 'src/shared/components/Tabs'
import UsersTable from 'src/admin/components/UsersTable'
import RolesTable from 'src/admin/components/RolesTable'
import QueriesPage from 'src/admin/containers/QueriesPage'
+import DatabaseManagerPage from 'src/admin/containers/DatabaseManagerPage'
const AdminTabs = ({
users,
@@ -29,6 +30,10 @@ const AdminTabs = ({
onUpdateUserPermissions,
}) => {
let tabs = [
+ {
+ type: 'DB Management',
+ component: (
Retention Policy | +Duration | + {isRFDisplayed ?Replication Factor | : null} ++ |
---|