Merge branch 'dbrp' into feature/db-manager

pull/1029/head
Jade McGough 2017-03-23 11:24:40 -07:00
commit baf7a938c4
10 changed files with 1069 additions and 4 deletions

View File

@ -316,6 +316,33 @@ type UsersStore interface {
Update(context.Context, *User) error Update(context.Context, *User) error
} }
type Database struct {
Name string `json:"name"` // a unique string identifier for the database
Duration string `json:"duration,omitempty"` // the duration (when creating a default retention policy)
Replication int32 `json:"replication,omitempty"` // the replication factor (when creating a default retention policy)
ShardDuration string `json:"shardDuration,omitempty"` // the shard duration (when creating a default retention policy)
}
type RetentionPolicy struct {
Name string `json:"name"` // a unique string identifier for the retention policy
Duration string `json:"duration,omitempty"` // the duration
Replication int32 `json:"replication,omitempty"` // the replication factor
ShardDuration string `json:"shardDuration,omitempty"` // the shard duration
Default bool `json:"default,omitempty"` // whether the RP should be the default
}
type Databases interface {
// All lists all databases
AllDB(context.Context) ([]Database, error)
Connect(context.Context, *Source) error
CreateDB(context.Context, *Database) (*Database, error)
DropDB(context.Context, string) error
AllRP(context.Context, string) ([]RetentionPolicy, error)
CreateRP(context.Context, string, *RetentionPolicy) (*RetentionPolicy, error)
UpdateRP(context.Context, string, string, *RetentionPolicy) (*RetentionPolicy, error)
DropRP(context.Context, string, string) error
}
// DashboardID is the dashboard ID // DashboardID is the dashboard ID
type DashboardID int type DashboardID int

157
influx/databases.go Normal file
View File

@ -0,0 +1,157 @@
package influx
import (
"bytes"
"context"
"encoding/json"
"fmt"
"github.com/influxdata/chronograf"
)
func (c *Client) AllDB(ctx context.Context) ([]chronograf.Database, error) {
databases, err := c.showDatabases(ctx)
if err != nil {
return nil, err
}
return databases, nil
}
func (c *Client) CreateDB(ctx context.Context, db *chronograf.Database) (*chronograf.Database, error) {
_, err := c.Query(ctx, chronograf.Query{
Command: fmt.Sprintf(`CREATE DATABASE "%s"`, db.Name),
})
if err != nil {
return nil, err
}
res := &chronograf.Database{Name: db.Name}
return res, nil
}
func (c *Client) DropDB(ctx context.Context, database string) error {
_, err := c.Query(ctx, chronograf.Query{
Command: fmt.Sprintf(`DROP DATABASE`),
DB: database,
})
if err != nil {
return err
}
return nil
}
func (c *Client) AllRP(ctx context.Context, database string) ([]chronograf.RetentionPolicy, error) {
retentionPolicies, err := c.showRetentionPolicies(ctx, database)
if err != nil {
return nil, err
}
return retentionPolicies, nil
}
func (c *Client) CreateRP(ctx context.Context, database string, rp *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error) {
_, err := c.Query(ctx, chronograf.Query{
Command: fmt.Sprintf(`CREATE RETENTION POLICY "%s" DURATION "%s" REPLICATION "%s"`, rp.Name, rp.Duration, rp.Replication),
DB: database,
})
if err != nil {
return nil, err
}
res := &chronograf.RetentionPolicy{
Name: rp.Name,
Duration: rp.Duration,
Replication: rp.Replication,
}
return res, nil
}
func (c *Client) UpdateRP(ctx context.Context, database string, name string, rp *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error) {
var buffer bytes.Buffer
buffer.WriteString("ALTER RETENTION POLICY")
if len(rp.Duration) > 0 {
buffer.WriteString(" DURATION " + rp.Duration)
}
if rp.Replication > 0 {
buffer.WriteString(" REPLICATION " + fmt.Sprint(rp.Replication))
}
if len(rp.ShardDuration) > 0 {
buffer.WriteString(" SHARD DURATION " + rp.ShardDuration)
}
if rp.Default == true {
buffer.WriteString(" DEFAULT")
}
_, err := c.Query(ctx, chronograf.Query{
Command: buffer.String(),
DB: database,
RP: name,
})
if err != nil {
return nil, err
}
// TODO: use actual information here
res := &chronograf.RetentionPolicy{
Name: name,
}
return res, nil
}
func (c *Client) DropRP(ctx context.Context, database string, rp string) error {
_, err := c.Query(ctx, chronograf.Query{
Command: fmt.Sprintf(`DROP RETENTION POLICY`),
DB: database,
RP: rp,
})
if err != nil {
return err
}
return nil
}
func (c *Client) showDatabases(ctx context.Context) ([]chronograf.Database, error) {
res, err := c.Query(ctx, chronograf.Query{
Command: `SHOW DATABASES`,
})
if err != nil {
return nil, err
}
octets, err := res.MarshalJSON()
if err != nil {
return nil, err
}
results := showResults{}
if err := json.Unmarshal(octets, &results); err != nil {
return nil, err
}
return results.Databases(), nil
}
func (c *Client) showRetentionPolicies(ctx context.Context, name string) ([]chronograf.RetentionPolicy, error) {
retentionPolicies, err := c.Query(ctx, chronograf.Query{
Command: fmt.Sprintf(`SHOW RETENTION POLICIES`),
DB: name,
})
if err != nil {
return nil, err
}
octets, err := retentionPolicies.MarshalJSON()
if err != nil {
return nil, err
}
results := showResults{}
if err := json.Unmarshal(octets, &results); err != nil {
return nil, err
}
return results.RetentionPolicies(), nil
}

View File

@ -15,6 +15,7 @@ import (
var _ chronograf.TimeSeries = &Client{} var _ chronograf.TimeSeries = &Client{}
var _ chronograf.TSDBStatus = &Client{} var _ chronograf.TSDBStatus = &Client{}
var _ chronograf.Databases = &Client{}
// Shared transports for all clients to prevent leaking connections // Shared transports for all clients to prevent leaking connections
var ( var (

View File

@ -75,6 +75,55 @@ func (r *showResults) Users() []chronograf.User {
return res return res
} }
// Databases converts SHOW DATABASES to chronograf Databases
func (r *showResults) Databases() []chronograf.Database {
res := []chronograf.Database{}
for _, u := range *r {
for _, s := range u.Series {
for _, v := range s.Values {
if name, ok := v[0].(string); !ok {
continue
} else {
d := chronograf.Database{Name: name}
res = append(res, d)
}
}
}
}
return res
}
func (r *showResults) RetentionPolicies() []chronograf.RetentionPolicy {
res := []chronograf.RetentionPolicy{}
for _, u := range *r {
for _, s := range u.Series {
for _, v := range s.Values {
if name, ok := v[0].(string); !ok {
continue
} else if duration, ok := v[1].(string); !ok {
continue
} else if sduration, ok := v[2].(string); !ok {
continue
} else if replication, ok := v[3].(float64); !ok {
continue
} else if def, ok := v[4].(bool); !ok {
continue
} else {
d := chronograf.RetentionPolicy{
Name: name,
Duration: duration,
ShardDuration: sduration,
Replication: int32(replication),
Default: def,
}
res = append(res, d)
}
}
}
}
return res
}
// Permissions converts SHOW GRANTS to chronograf.Permissions // Permissions converts SHOW GRANTS to chronograf.Permissions
func (r *showResults) Permissions() chronograf.Permissions { func (r *showResults) Permissions() chronograf.Permissions {
res := []chronograf.Permission{} res := []chronograf.Permission{}

View File

@ -4,9 +4,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"strconv"
"github.com/bouk/httprouter"
"github.com/influxdata/chronograf" "github.com/influxdata/chronograf"
) )
@ -130,7 +128,7 @@ func (s *Service) RemoveDashboard(w http.ResponseWriter, r *http.Request) {
// ReplaceDashboard completely replaces a dashboard // ReplaceDashboard completely replaces a dashboard
func (s *Service) ReplaceDashboard(w http.ResponseWriter, r *http.Request) { func (s *Service) ReplaceDashboard(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
idParam, err := strconv.Atoi(httprouter.GetParamFromContext(ctx, "id")) idParam, err := paramID("id", r)
if err != nil { if err != nil {
msg := fmt.Sprintf("Could not parse dashboard ID: %s", err) msg := fmt.Sprintf("Could not parse dashboard ID: %s", err)
Error(w, http.StatusInternalServerError, msg, s.Logger) Error(w, http.StatusInternalServerError, msg, s.Logger)
@ -168,10 +166,11 @@ func (s *Service) ReplaceDashboard(w http.ResponseWriter, r *http.Request) {
// UpdateDashboard completely updates either the dashboard name or the cells // UpdateDashboard completely updates either the dashboard name or the cells
func (s *Service) UpdateDashboard(w http.ResponseWriter, r *http.Request) { func (s *Service) UpdateDashboard(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
idParam, err := strconv.Atoi(httprouter.GetParamFromContext(ctx, "id")) idParam, err := paramID("id", r)
if err != nil { if err != nil {
msg := fmt.Sprintf("Could not parse dashboard ID: %s", err) msg := fmt.Sprintf("Could not parse dashboard ID: %s", err)
Error(w, http.StatusInternalServerError, msg, s.Logger) Error(w, http.StatusInternalServerError, msg, s.Logger)
return
} }
id := chronograf.DashboardID(idParam) id := chronograf.DashboardID(idParam)

364
server/databases.go Normal file
View File

@ -0,0 +1,364 @@
package server
import (
"encoding/json"
"fmt"
"net/http"
"github.com/bouk/httprouter"
"github.com/influxdata/chronograf"
)
type dbLinks struct {
Self string `json:"self"` // Self link mapping to this resource
RPs string `json:"rps"` // URL for retention policies for this database
}
type dbResponse struct {
Name string `json:"name"` // a unique string identifier for the database
Duration string `json:"duration,omitempty"` // the duration (when creating a default retention policy)
Replication int32 `json:"replication,omitempty"` // the replication factor (when creating a default retention policy)
ShardDuration string `json:"shardDuration,omitempty"` // the shard duration (when creating a default retention policy)
Links dbLinks `json:"links"` // Links are URI locations related to the database
}
type dbsResponse struct {
Databases []dbResponse `json:"databases"`
}
type rpLinks struct {
Self string `json:"self"` // Self link mapping to this resource
}
type rpResponse struct {
Name string `json:"name"` // a unique string identifier for the retention policy
Duration string `json:"duration"` // the duration
Replication int32 `json:"replication"` // the replication factor
ShardDuration string `json:"shardDuration"` // the shard duration
Default bool `json:"default"` // whether the RP should be the default
Links rpLinks `json:"links"` // Links are URI locations related to the database
}
type rpsResponse struct {
RetentionPolicies []rpResponse `json:"retentionPolicies"`
}
// Databases queries the list of all databases for a source
func (h *Service) GetDatabases(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.SourcesStore.Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
db := h.Databases
if err = db.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
databases, err := db.AllDB(ctx)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
dbs := make([]dbResponse, len(databases))
for i, d := range databases {
dbs[i] = dbResponse{
Name: d.Name,
}
}
res := dbsResponse{
Databases: dbs,
}
encodeJSON(w, http.StatusOK, res, h.Logger)
}
func (h *Service) NewDatabase(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.SourcesStore.Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
db := h.Databases
if err = db.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
postedDB := &chronograf.Database{}
if err := json.NewDecoder(r.Body).Decode(postedDB); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := ValidDatabaseRequest(postedDB); err != nil {
invalidData(w, err, h.Logger)
return
}
database, err := db.CreateDB(ctx, postedDB)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
res := dbResponse{Name: database.Name}
encodeJSON(w, http.StatusCreated, res, h.Logger)
}
func (h *Service) DropDatabase(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.SourcesStore.Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
db := h.Databases
if err = db.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
dbID := httprouter.GetParamFromContext(ctx, "dbid")
dropErr := db.DropDB(ctx, dbID)
if dropErr != nil {
Error(w, http.StatusBadRequest, dropErr.Error(), h.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
func (h *Service) RetentionPolicies(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.SourcesStore.Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
db := h.Databases
if err = db.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
dbID := httprouter.GetParamFromContext(ctx, "dbid")
allRP, err := db.AllRP(ctx, dbID)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
rps := make([]rpResponse, len(allRP))
for i, rp := range allRP {
rps[i] = rpResponse{
Name: rp.Name,
Duration: rp.Duration,
Replication: rp.Replication,
ShardDuration: rp.ShardDuration,
Default: rp.Default,
}
}
res := rpsResponse{
RetentionPolicies: rps,
}
encodeJSON(w, http.StatusOK, res, h.Logger)
}
func (h *Service) NewRetentionPolicy(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.SourcesStore.Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
db := h.Databases
if err = db.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
postedRP := &chronograf.RetentionPolicy{}
if err := json.NewDecoder(r.Body).Decode(postedRP); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := ValidRetentionPolicyRequest(postedRP); err != nil {
invalidData(w, err, h.Logger)
return
}
dbID := httprouter.GetParamFromContext(ctx, "dbid")
database, err := db.CreateRP(ctx, dbID, postedRP)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
res := dbResponse{Name: database.Name}
encodeJSON(w, http.StatusCreated, res, h.Logger)
}
func (h *Service) UpdateRetentionPolicy(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.SourcesStore.Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
db := h.Databases
if err = db.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
postedRP := &chronograf.RetentionPolicy{}
if err := json.NewDecoder(r.Body).Decode(postedRP); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := ValidRetentionPolicyRequest(postedRP); err != nil {
invalidData(w, err, h.Logger)
return
}
dbID := httprouter.GetParamFromContext(ctx, "dbid")
rpID := httprouter.GetParamFromContext(ctx, "rpid")
rp, err := db.UpdateRP(ctx, dbID, rpID, postedRP)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
// TODO: this needs to be the actual RP information
res := rpResponse{Name: rp.Name}
encodeJSON(w, http.StatusCreated, res, h.Logger)
}
func (h *Service) DropRetentionPolicy(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.SourcesStore.Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
db := h.Databases
if err = db.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
dbID := httprouter.GetParamFromContext(ctx, "dbid")
rpID := httprouter.GetParamFromContext(ctx, "rpid")
dropErr := db.DropRP(ctx, dbID, rpID)
if dropErr != nil {
Error(w, http.StatusBadRequest, dropErr.Error(), h.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
func ValidDatabaseRequest(d *chronograf.Database) error {
if len(d.Name) == 0 {
return fmt.Errorf("name is required")
}
return nil
}
func ValidRetentionPolicyRequest(rp *chronograf.RetentionPolicy) error {
if len(rp.Name) == 0 {
return fmt.Errorf("name is required")
}
if len(rp.Duration) == 0 {
return fmt.Errorf("duration is required")
}
if rp.Replication == 0 {
return fmt.Errorf("replication factor is invalid")
}
return nil
}

View File

@ -131,6 +131,19 @@ func NewMux(opts MuxOpts, service Service) http.Handler {
router.PUT("/chronograf/v1/dashboards/:id", service.ReplaceDashboard) router.PUT("/chronograf/v1/dashboards/:id", service.ReplaceDashboard)
router.PATCH("/chronograf/v1/dashboards/:id", service.UpdateDashboard) router.PATCH("/chronograf/v1/dashboards/:id", service.UpdateDashboard)
// Databases
router.GET("/chronograf/v1/sources/:id/dbs", service.GetDatabases)
router.POST("/chronograf/v1/sources/:id/dbs", service.NewDatabase)
router.DELETE("/chronograf/v1/sources/:id/dbs/:dbid", service.DropDatabase)
// Retention Policies
router.GET("/chronograf/v1/sources/:id/dbs/:dbid/rps", service.RetentionPolicies)
router.POST("/chronograf/v1/sources/:id/dbs/:dbid/rps", service.NewRetentionPolicy)
router.PATCH("/chronograf/v1/sources/:id/dbs/:dbid/rps/:rpid", service.UpdateRetentionPolicy)
router.DELETE("/chronograf/v1/sources/:id/dbs/:dbid/rps/:rpid", service.DropRetentionPolicy)
var authRoutes AuthRoutes var authRoutes AuthRoutes
var out http.Handler var out http.Handler

View File

@ -21,6 +21,7 @@ import (
client "github.com/influxdata/usage-client/v1" client "github.com/influxdata/usage-client/v1"
flags "github.com/jessevdk/go-flags" flags "github.com/jessevdk/go-flags"
"github.com/tylerb/graceful" "github.com/tylerb/graceful"
"github.com/influxdata/chronograf/influx"
) )
var ( var (
@ -292,6 +293,7 @@ func openService(boltPath, cannedPath string, logger chronograf.Logger, useAuth
AlertRulesStore: db.AlertsStore, AlertRulesStore: db.AlertsStore,
Logger: logger, Logger: logger,
UseAuth: useAuth, UseAuth: useAuth,
Databases: &influx.Client{Logger: logger},
} }
} }

View File

@ -20,6 +20,7 @@ type Service struct {
TimeSeriesClient TimeSeriesClient TimeSeriesClient TimeSeriesClient
Logger chronograf.Logger Logger chronograf.Logger
UseAuth bool UseAuth bool
Databases chronograf.Databases
} }
// TimeSeriesClient returns the correct client for a time series database. // TimeSeriesClient returns the correct client for a time series database.

View File

@ -769,6 +769,329 @@
} }
} }
}, },
"/sources/{id}/dbs/": {
"get": {
"tags": [
"databases"
],
"summary": "Retrieve all databases for a source",
"parameters": [
{
"name": "id",
"in": "path",
"type": "string",
"description": "ID of the data source",
"required": true
}
],
"responses": {
"200": {
"description": "Listing of all databases for a source",
"schema": {
"$ref": "#/definitions/Databases"
}
},
"404": {
"description": "Data source id does not exist.",
"schema": {
"$ref": "#/definitions/Error"
}
},
"default": {
"description": "A processing or an unexpected error.",
"schema": {
"$ref": "#/definitions/Error"
}
}
}
},
"post": {
"tags": [
"databases"
],
"summary": "Create new database for a source",
"parameters": [
{
"name": "id",
"in": "path",
"type": "string",
"description": "ID of the data source",
"required": true
},
{
"name": "database",
"in": "body",
"description": "Configuration options for a database",
"schema": {
"$ref": "#/definitions/Database"
},
"required": true
}
],
"responses": {
"201": {
"description": "Database successfully created.",
"schema": {
"$ref": "#/definitions/Database"
}
},
"404": {
"description": "Data source id does not exist.",
"schema": {
"$ref": "#/definitions/Error"
}
},
"default": {
"description": "A processing or an unexpected error.",
"schema": {
"$ref": "#/definitions/Error"
}
}
}
}
},
"/sources/{id}/dbs/{db_id}": {
"delete": {
"tags": [
"databases"
],
"summary": "Delete database for a source",
"parameters": [
{
"name": "id",
"in": "path",
"type": "string",
"description": "ID of the data source",
"required": true
},
{
"name": "db_id",
"in": "path",
"type": "string",
"description": "ID of the database",
"required": true
}
],
"responses": {
"204": {
"description": "Database has been deleted",
},
"404": {
"description": "Data source id does not exist.",
"schema": {
"$ref": "#/definitions/Error"
}
},
"default": {
"description": "A processing or an unexpected error.",
"schema": {
"$ref": "#/definitions/Error"
}
}
}
}
},
"/sources/{id}/dbs/{db_id}/rps": {
"get": {
"tags": [
"retention policies"
],
"summary": "Retrieve all retention policies for a database",
"parameters": [
{
"name": "id",
"in": "path",
"type": "string",
"description": "ID of the data source",
"required": true
},
{
"name": "db_id",
"in": "path",
"type": "string",
"description": "ID of the database",
"required": true
}
],
"responses": {
"200": {
"description": "Listing of all retention policies for a database",
"schema": {
"$ref": "#/definitions/RetentionPolicies"
}
},
"404": {
"description": "Specified retention policy does not exist.",
"schema": {
"$ref": "#/definitions/Error"
}
},
"default": {
"description": "A processing or an unexpected error.",
"schema": {
"$ref": "#/definitions/Error"
}
}
}
},
"post": {
"tags": [
"retention policies"
],
"summary": "Create new retention policy for a database",
"parameters": [
{
"name": "id",
"in": "path",
"type": "string",
"description": "ID of the data source",
"required": true
},
{
"name": "db_id",
"in": "path",
"type": "string",
"description": "ID of the database",
"required": true
},
{
"name": "rp",
"in": "body",
"description": "Configuration options for the retention policy",
"schema": {
"$ref": "#/definitions/RetentionPolicy"
},
"required": true
}
],
"responses": {
"201": {
"description": "Retention Policy successfully created.",
"schema": {
"$ref": "#/definitions/RetentionPolicy"
}
},
"404": {
"description": "Data source id does not exist.",
"schema": {
"$ref": "#/definitions/Error"
}
},
"default": {
"description": "A processing or an unexpected error.",
"schema": {
"$ref": "#/definitions/Error"
}
}
}
}
},
"/sources/{id}/dbs/{db_id}/rps/{rp_id}": {
"patch": {
"tags": [
"retention policies"
],
"summary": "Alter retention policy for a database",
"parameters": [
{
"name": "id",
"in": "path",
"type": "string",
"description": "ID of the data source",
"required": true
},
{
"name": "db_id",
"in": "path",
"type": "string",
"description": "ID of the database",
"required": true
},
{
"name": "rp_id",
"in": "path",
"type": "string",
"description": "ID of the retention policy",
"required": true
},
{
"name": "rp",
"in": "body",
"description": "Configuration options for the retention policy",
"schema": {
"$ref": "#/definitions/RetentionPolicy"
},
"required": true
}
],
"responses": {
"200": {
"description": "Retention Policy was altered",
"schema": {
"$ref": "#/definitions/RetentionPolicy"
}
},
"404": {
"description": "Database or source does not exist.",
"schema": {
"$ref": "#/definitions/Error"
}
},
"default": {
"description": "A processing or an unexpected error.",
"schema": {
"$ref": "#/definitions/Error"
}
}
}
},
"delete": {
"tags": [
"retention policies"
],
"summary": "Delete retention policy for a database",
"parameters": [
{
"name": "id",
"in": "path",
"type": "string",
"description": "ID of the data source",
"required": true
},
{
"name": "db_id",
"in": "path",
"type": "string",
"description": "ID of the database",
"required": true
},
{
"name": "rp_id",
"in": "path",
"type": "string",
"description": "ID of the retention policy",
"required": true
}
],
"responses": {
"204": {
"description": "Retention Policy has been deleted",
},
"404": {
"description": "Data source id does not exist.",
"schema": {
"$ref": "#/definitions/Error"
}
},
"default": {
"description": "A processing or an unexpected error.",
"schema": {
"$ref": "#/definitions/Error"
}
}
}
}
},
"/sources/{id}/kapacitors": { "/sources/{id}/kapacitors": {
"get": { "get": {
"tags": [ "tags": [
@ -1912,6 +2235,70 @@
} }
}, },
"definitions": { "definitions": {
"Databases": {
"type": "object",
"required": [
"databases"
],
"properties": {
"databases": {
"type": "array",
"items": {
"$ref": "#/definitions/Database"
}
}
}
},
"Database": {
"type": "object",
"required": [
"name"
],
"example": {
"name": "NOAA_water_database",
"duration": "3d",
"replication": 3,
"shardDuration": "3h",
"links": {
"self": "/chronograf/v1/sources/1/dbs/NOAA_water_database",
"rps": "/chronograf/v1/sources/1/dbs/NOAA_water_database/rps"
}
},
"properties": {
"name": {
"type": "string",
"description": "The identifying name of the database",
},
"duration": {
"type": "string",
"description": "the duration of the default retention policy"
},
"replication": {
"type": "integer",
"format": "int32",
"description": "how many copies of the data are stored in the cluster"
},
"shardDuration": {
"type": "string",
"description": "the interval spanned by each shard group"
},
"links": {
"type": "object",
"properties": {
"self": {
"type": "string",
"description": "Self link mapping to this resource",
"format": "url"
},
"rps": {
"type": "string",
"description": "Link to retention policies for this database",
"format": "url"
}
}
}
}
},
"Kapacitors": { "Kapacitors": {
"type": "object", "type": "object",
"required": [ "required": [
@ -2150,6 +2537,71 @@
} }
} }
}, },
"RetentionPolicies": {
"type": "object",
"required": [
"retentionPolicies"
],
"properties": {
"retentionPolicies": {
"type": "array",
"items": {
"$ref": "#/definitions/RetentionPolicy"
}
}
}
},
"RetentionPolicy": {
"type": "object",
"required": [
"name",
"duration",
"replication"
],
"example": {
"name": "weekly",
"duration": "7d",
"replication": 1,
"shardDuration": "7d",
"default": true,
"links": {
"self": "/chronograf/v1/ousrces/1/dbs/NOAA_water_database/rps/liquid"
}
},
"properties": {
"name": {
"type": "string",
"description": "The identifying name of the retention policy",
},
"duration": {
"type": "string",
"description": "the duration of the retention policy"
},
"replication": {
"type": "integer",
"format": "int32",
"description": "how many copies of the data are stored in the cluster"
},
"shardDuration": {
"type": "string",
"description": "the interval spanned by each shard group"
},
"default": {
"type": "boolean",
"description": "Indicates whether this retention policy should be the default"
},
"links": {
"type": "object",
"properties": {
"self": {
"type": "string",
"description": "Self link mapping to this resource",
"format": "url"
}
}
}
}
},
"Rule": { "Rule": {
"type": "object", "type": "object",
"example": { "example": {