chore: delete the rest of chronograf (#21998)

* chore: deleted the rest of chronograf

* chore: tidy
pull/22023/head
William Baker 2021-08-02 09:02:54 -06:00 committed by GitHub
parent 3c1d841df6
commit 3f7b996a10
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 2 additions and 7761 deletions

View File

@ -1,827 +0,0 @@
package chronograf
import (
"context"
"io"
"net/http"
"time"
)
// General errors.
const (
ErrUpstreamTimeout = Error("request to backend timed out")
ErrSourceNotFound = Error("source not found")
ErrServerNotFound = Error("server not found")
ErrLayoutNotFound = Error("layout not found")
ErrDashboardNotFound = Error("dashboard not found")
ErrUserNotFound = Error("user not found")
ErrLayoutInvalid = Error("layout is invalid")
ErrDashboardInvalid = Error("dashboard is invalid")
ErrSourceInvalid = Error("source is invalid")
ErrServerInvalid = Error("server is invalid")
ErrAlertNotFound = Error("alert not found")
ErrAuthentication = Error("user not authenticated")
ErrUninitialized = Error("client uninitialized. Call Open() method")
ErrInvalidAxis = Error("Unexpected axis in cell. Valid axes are 'x', 'y', and 'y2'")
ErrInvalidColorType = Error("Invalid color type. Valid color types are 'min', 'max', 'threshold', 'text', and 'background'")
ErrInvalidColor = Error("Invalid color. Accepted color format is #RRGGBB")
ErrUserAlreadyExists = Error("user already exists")
ErrOrganizationNotFound = Error("organization not found")
ErrMappingNotFound = Error("mapping not found")
ErrOrganizationAlreadyExists = Error("organization already exists")
ErrCannotDeleteDefaultOrganization = Error("cannot delete default organization")
ErrConfigNotFound = Error("cannot find configuration")
ErrAnnotationNotFound = Error("annotation not found")
ErrInvalidCellOptionsText = Error("invalid text wrapping option. Valid wrappings are 'truncate', 'wrap', and 'single line'")
ErrInvalidCellOptionsSort = Error("cell options sortby cannot be empty'")
ErrInvalidCellOptionsColumns = Error("cell options columns cannot be empty'")
ErrOrganizationConfigNotFound = Error("could not find organization config")
)
// Error is a domain error encountered while processing chronograf requests
type Error string
func (e Error) Error() string {
return string(e)
}
// Logger represents an abstracted structured logging implementation. It
// provides methods to trigger log messages at various alert levels and a
// WithField method to set keys for a structured log message.
type Logger interface {
Debug(...interface{})
Info(...interface{})
Error(...interface{})
WithField(string, interface{}) Logger
// Logger can be transformed into an io.Writer.
// That writer is the end of an io.Pipe and it is your responsibility to close it.
Writer() *io.PipeWriter
}
// NoopLogger is a chronograf logger that does nothing.
type NoopLogger struct{}
func (l *NoopLogger) Debug(...interface{}) {
}
func (l *NoopLogger) Info(...interface{}) {
}
func (l *NoopLogger) Error(...interface{}) {
}
func (l *NoopLogger) WithField(string, interface{}) Logger {
return l
}
func (l *NoopLogger) Writer() *io.PipeWriter {
return nil
}
// Router is an abstracted Router based on the API provided by the
// julienschmidt/httprouter package.
type Router interface {
http.Handler
GET(string, http.HandlerFunc)
PATCH(string, http.HandlerFunc)
POST(string, http.HandlerFunc)
DELETE(string, http.HandlerFunc)
PUT(string, http.HandlerFunc)
Handler(string, string, http.Handler)
}
// Assets returns a handler to serve the website.
type Assets interface {
Handler() http.Handler
}
// Supported time-series databases
const (
// InfluxDB is the open-source time-series database
InfluxDB = "influx"
// InfluxEnteprise is the clustered HA time-series database
InfluxEnterprise = "influx-enterprise"
// InfluxRelay is the basic HA layer over InfluxDB
InfluxRelay = "influx-relay"
)
// TSDBStatus represents the current status of a time series database
type TSDBStatus interface {
// Connect will connect to the time series using the information in `Source`.
Connect(ctx context.Context, src *Source) error
// Ping returns version and TSDB type of time series database if reachable.
Ping(context.Context) error
// Version returns the version of the TSDB database
Version(context.Context) (string, error)
// Type returns the type of the TSDB database
Type(context.Context) (string, error)
}
// Point is a field set in a series
type Point struct {
Database string
RetentionPolicy string
Measurement string
Time int64
Tags map[string]string
Fields map[string]interface{}
}
// TimeSeries represents a queryable time series database.
type TimeSeries interface {
// Connect will connect to the time series using the information in `Source`.
Connect(context.Context, *Source) error
// Query retrieves time series data from the database.
Query(context.Context, Query) (Response, error)
// Write records points into a series
Write(context.Context, []Point) error
// UsersStore represents the user accounts within the TimeSeries database
Users(context.Context) UsersStore
// Permissions returns all valid names permissions in this database
Permissions(context.Context) Permissions
// Roles represents the roles associated with this TimesSeriesDatabase
Roles(context.Context) (RolesStore, error)
}
// Role is a restricted set of permissions assigned to a set of users.
type Role struct {
Name string `json:"name"`
Permissions Permissions `json:"permissions,omitempty"`
Users []User `json:"users,omitempty"`
Organization string `json:"organization,omitempty"`
}
// RolesStore is the Storage and retrieval of authentication information
type RolesStore interface {
// All lists all roles from the RolesStore
All(context.Context) ([]Role, error)
// Create a new Role in the RolesStore
Add(context.Context, *Role) (*Role, error)
// Delete the Role from the RolesStore
Delete(context.Context, *Role) error
// Get retrieves a role if name exists.
Get(ctx context.Context, name string) (*Role, error)
// Update the roles' users or permissions
Update(context.Context, *Role) error
}
// Range represents an upper and lower bound for data
type Range struct {
Upper int64 `json:"upper"` // Upper is the upper bound
Lower int64 `json:"lower"` // Lower is the lower bound
}
// TemplateValue is a value use to replace a template in an InfluxQL query
type TemplateValue struct {
Value string `json:"value"` // Value is the specific value used to replace a template in an InfluxQL query
Type string `json:"type"` // Type can be tagKey, tagValue, fieldKey, csv, map, measurement, database, constant, influxql
Selected bool `json:"selected"` // Selected states that this variable has been picked to use for replacement
Key string `json:"key,omitempty"` // Key is the key for the Value if the Template Type is 'map'
}
// TemplateVar is a named variable within an InfluxQL query to be replaced with Values
type TemplateVar struct {
Var string `json:"tempVar"` // Var is the string to replace within InfluxQL
Values []TemplateValue `json:"values"` // Values are the replacement values within InfluxQL
}
// TemplateID is the unique ID used to identify a template
type TemplateID string
// Template represents a series of choices to replace TemplateVars within InfluxQL
type Template struct {
TemplateVar
ID TemplateID `json:"id"` // ID is the unique ID associated with this template
Type string `json:"type"` // Type can be fieldKeys, tagKeys, tagValues, csv, constant, measurements, databases, map, influxql, text
Label string `json:"label"` // Label is a user-facing description of the Template
Query *TemplateQuery `json:"query,omitempty"` // Query is used to generate the choices for a template
}
// Query retrieves a Response from a TimeSeries.
type Query struct {
Command string `json:"query"` // Command is the query itself
DB string `json:"db,omitempty"` // DB is optional and if empty will not be used.
RP string `json:"rp,omitempty"` // RP is a retention policy and optional; if empty will not be used.
Epoch string `json:"epoch,omitempty"` // Epoch is the time format for the return results
Wheres []string `json:"wheres,omitempty"` // Wheres restricts the query to certain attributes
GroupBys []string `json:"groupbys,omitempty"` // GroupBys collate the query by these tags
Label string `json:"label,omitempty"` // Label is the Y-Axis label for the data
Range *Range `json:"range,omitempty"` // Range is the default Y-Axis range for the data
}
// DashboardQuery includes state for the query builder. This is a transition
// struct while we move to the full InfluxQL AST
type DashboardQuery struct {
Command string `json:"query"` // Command is the query itself
Label string `json:"label,omitempty"` // Label is the Y-Axis label for the data
Range *Range `json:"range,omitempty"` // Range is the default Y-Axis range for the data
QueryConfig QueryConfig `json:"queryConfig,omitempty"` // QueryConfig represents the query state that is understood by the data explorer
Source string `json:"source"` // Source is the optional URI to the data source for this queryConfig
Shifts []TimeShift `json:"-"` // Shifts represents shifts to apply to an influxql query's time range. Clients expect the shift to be in the generated QueryConfig
// This was added after this code was brought over to influxdb.
Type string `json:"type,omitempty"`
}
// TemplateQuery is used to retrieve choices for template replacement
type TemplateQuery struct {
Command string `json:"influxql"` // Command is the query itself
DB string `json:"db,omitempty"` // DB is optional and if empty will not be used.
RP string `json:"rp,omitempty"` // RP is a retention policy and optional; if empty will not be used.
Measurement string `json:"measurement"` // Measurement is the optionally selected measurement for the query
TagKey string `json:"tagKey"` // TagKey is the optionally selected tag key for the query
FieldKey string `json:"fieldKey"` // FieldKey is the optionally selected field key for the query
}
// Response is the result of a query against a TimeSeries
type Response interface {
MarshalJSON() ([]byte, error)
}
// Source is connection information to a time-series data store.
type Source struct {
ID int `json:"id,string"` // ID is the unique ID of the source
Name string `json:"name"` // Name is the user-defined name for the source
Type string `json:"type,omitempty"` // Type specifies which kinds of source (enterprise vs oss)
Username string `json:"username,omitempty"` // Username is the username to connect to the source
Password string `json:"password,omitempty"` // Password is in CLEARTEXT
SharedSecret string `json:"sharedSecret,omitempty"` // ShareSecret is the optional signing secret for Influx JWT authorization
URL string `json:"url"` // URL are the connections to the source
MetaURL string `json:"metaUrl,omitempty"` // MetaURL is the url for the meta node
InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` // InsecureSkipVerify as true means any certificate presented by the source is accepted.
Default bool `json:"default"` // Default specifies the default source for the application
Telegraf string `json:"telegraf"` // Telegraf is the db telegraf is written to. By default it is "telegraf"
Organization string `json:"organization"` // Organization is the organization ID that resource belongs to
Role string `json:"role,omitempty"` // Not Currently Used. Role is the name of the minimum role that a user must possess to access the resource.
DefaultRP string `json:"defaultRP"` // DefaultRP is the default retention policy used in database queries to this source
}
// SourcesStore stores connection information for a `TimeSeries`
type SourcesStore interface {
// All returns all sources in the store
All(context.Context) ([]Source, error)
// Add creates a new source in the SourcesStore and returns Source with ID
Add(context.Context, Source) (Source, error)
// Delete the Source from the store
Delete(context.Context, Source) error
// Get retrieves Source if `ID` exists
Get(ctx context.Context, ID int) (Source, error)
// Update the Source in the store.
Update(context.Context, Source) error
}
// DBRP represents a database and retention policy for a time series source
type DBRP struct {
DB string `json:"db"`
RP string `json:"rp"`
}
// AlertRule represents rules for building a tickscript alerting task
type AlertRule struct {
ID string `json:"id,omitempty"` // ID is the unique ID of the alert
TICKScript TICKScript `json:"tickscript"` // TICKScript is the raw tickscript associated with this Alert
Query *QueryConfig `json:"query"` // Query is the filter of data for the alert.
Every string `json:"every"` // Every how often to check for the alerting criteria
AlertNodes AlertNodes `json:"alertNodes"` // AlertNodes defines the destinations for the alert
Message string `json:"message"` // Message included with alert
Details string `json:"details"` // Details is generally used for the Email alert. If empty will not be added.
Trigger string `json:"trigger"` // Trigger is a type that defines when to trigger the alert
TriggerValues TriggerValues `json:"values"` // Defines the values that cause the alert to trigger
Name string `json:"name"` // Name is the user-defined name for the alert
Type string `json:"type"` // Represents the task type where stream is data streamed to kapacitor and batch is queried by kapacitor
DBRPs []DBRP `json:"dbrps"` // List of database retention policy pairs the task is allowed to access
Status string `json:"status"` // Represents if this rule is enabled or disabled in kapacitor
Executing bool `json:"executing"` // Whether the task is currently executing
Error string `json:"error"` // Any error encountered when kapacitor executes the task
Created time.Time `json:"created"` // Date the task was first created
Modified time.Time `json:"modified"` // Date the task was last modified
LastEnabled time.Time `json:"last-enabled,omitempty"` // Date the task was last set to status enabled
}
// TICKScript task to be used by kapacitor
type TICKScript string
// Ticker generates tickscript tasks for kapacitor
type Ticker interface {
// Generate will create the tickscript to be used as a kapacitor task
Generate(AlertRule) (TICKScript, error)
}
// TriggerValues specifies the alerting logic for a specific trigger type
type TriggerValues struct {
Change string `json:"change,omitempty"` // Change specifies if the change is a percent or absolute
Period string `json:"period,omitempty"` // Period length of time before deadman is alerted
Shift string `json:"shift,omitempty"` // Shift is the amount of time to look into the past for the alert to compare to the present
Operator string `json:"operator,omitempty"` // Operator for alert comparison
Value string `json:"value,omitempty"` // Value is the boundary value when alert goes critical
RangeValue string `json:"rangeValue"` // RangeValue is an optional value for range comparisons
}
// Field represent influxql fields and functions from the UI
type Field struct {
Value interface{} `json:"value"`
Type string `json:"type"`
Alias string `json:"alias"`
Args []Field `json:"args,omitempty"`
}
// GroupBy represents influxql group by tags from the UI
type GroupBy struct {
Time string `json:"time"`
Tags []string `json:"tags"`
}
// DurationRange represents the lower and upper durations of the query config
type DurationRange struct {
Upper string `json:"upper"`
Lower string `json:"lower"`
}
// TimeShift represents a shift to apply to an influxql query's time range
type TimeShift struct {
Label string `json:"label"` // Label user facing description
Unit string `json:"unit"` // Unit influxql time unit representation i.e. ms, s, m, h, d
Quantity string `json:"quantity"` // Quantity number of units
}
// QueryConfig represents UI query from the data explorer
type QueryConfig struct {
ID string `json:"id,omitempty"`
Database string `json:"database"`
Measurement string `json:"measurement"`
RetentionPolicy string `json:"retentionPolicy"`
Fields []Field `json:"fields"`
Tags map[string][]string `json:"tags"`
GroupBy GroupBy `json:"groupBy"`
AreTagsAccepted bool `json:"areTagsAccepted"`
Fill string `json:"fill,omitempty"`
RawText *string `json:"rawText"`
Range *DurationRange `json:"range"`
Shifts []TimeShift `json:"shifts"`
}
// KapacitorNode adds arguments and properties to an alert
type KapacitorNode struct {
Name string `json:"name"`
Args []string `json:"args"`
Properties []KapacitorProperty `json:"properties"`
// In the future we could add chaining methods here.
}
// KapacitorProperty modifies the node they are called on
type KapacitorProperty struct {
Name string `json:"name"`
Args []string `json:"args"`
}
// Server represents a proxy connection to an HTTP server
type Server struct {
ID int `json:"id,string"` // ID is the unique ID of the server
SrcID int `json:"srcId,string"` // SrcID of the data source
Name string `json:"name"` // Name is the user-defined name for the server
Username string `json:"username"` // Username is the username to connect to the server
Password string `json:"password"` // Password is in CLEARTEXT
URL string `json:"url"` // URL are the connections to the server
InsecureSkipVerify bool `json:"insecureSkipVerify"` // InsecureSkipVerify as true means any certificate presented by the server is accepted.
Active bool `json:"active"` // Is this the active server for the source?
Organization string `json:"organization"` // Organization is the organization ID that resource belongs to
Type string `json:"type"` // Type is the kind of service (e.g. kapacitor or flux)
Metadata map[string]interface{} `json:"metadata"` // Metadata is any other data that the frontend wants to store about this service
}
// ServersStore stores connection information for a `Server`
type ServersStore interface {
// All returns all servers in the store
All(context.Context) ([]Server, error)
// Add creates a new source in the ServersStore and returns Server with ID
Add(context.Context, Server) (Server, error)
// Delete the Server from the store
Delete(context.Context, Server) error
// Get retrieves Server if `ID` exists
Get(ctx context.Context, ID int) (Server, error)
// Update the Server in the store.
Update(context.Context, Server) error
}
// ID creates uniq ID string
type ID interface {
// Generate creates a unique ID string
Generate() (string, error)
}
const (
// AllScope grants permission for all databases.
AllScope Scope = "all"
// DBScope grants permissions for a specific database
DBScope Scope = "database"
)
// Permission is a specific allowance for User or Role bound to a
// scope of the data source
type Permission struct {
Scope Scope `json:"scope"`
Name string `json:"name,omitempty"`
Allowed Allowances `json:"allowed"`
}
// Permissions represent the entire set of permissions a User or Role may have
type Permissions []Permission
// Allowances defines what actions a user can have on a scoped permission
type Allowances []string
// Scope defines the location of access of a permission
type Scope string
// User represents an authenticated user.
type User struct {
ID uint64 `json:"id,string,omitempty"`
Name string `json:"name"`
Passwd string `json:"password,omitempty"`
Permissions Permissions `json:"permissions,omitempty"`
Roles []Role `json:"roles"`
Provider string `json:"provider,omitempty"`
Scheme string `json:"scheme,omitempty"`
SuperAdmin bool `json:"superAdmin,omitempty"`
}
// UserQuery represents the attributes that a user may be retrieved by.
// It is predominantly used in the UsersStore.Get method.
//
// It is expected that only one of ID or Name, Provider, and Scheme will be
// specified, but all are provided UserStores should prefer ID.
type UserQuery struct {
ID *uint64
Name *string
Provider *string
Scheme *string
}
// UsersStore is the Storage and retrieval of authentication information
//
// While not necessary for the app to function correctly, it is
// expected that Implementors of the UsersStore will take
// care to guarantee that the combinartion of a users Name, Provider,
// and Scheme are unique.
type UsersStore interface {
// All lists all users from the UsersStore
All(context.Context) ([]User, error)
// Create a new User in the UsersStore
Add(context.Context, *User) (*User, error)
// Delete the User from the UsersStore
Delete(context.Context, *User) error
// Get retrieves a user if name exists.
Get(ctx context.Context, q UserQuery) (*User, error)
// Update the user's permissions or roles
Update(context.Context, *User) error
// Num returns the number of users in the UsersStore
Num(context.Context) (int, error)
}
// Database represents a database in a time series source
type Database struct {
Name string `json:"name"` // a unique string identifier for the database
Duration string `json:"duration,omitempty"` // the duration (when creating a default retention policy)
Replication int32 `json:"replication,omitempty"` // the replication factor (when creating a default retention policy)
ShardDuration string `json:"shardDuration,omitempty"` // the shard duration (when creating a default retention policy)
}
// RetentionPolicy represents a retention policy in a time series source
type RetentionPolicy struct {
Name string `json:"name"` // a unique string identifier for the retention policy
Duration string `json:"duration,omitempty"` // the duration
Replication int32 `json:"replication,omitempty"` // the replication factor
ShardDuration string `json:"shardDuration,omitempty"` // the shard duration
Default bool `json:"isDefault,omitempty"` // whether the RP should be the default
}
// Measurement represents a measurement in a time series source
type Measurement struct {
Name string `json:"name"` // a unique string identifier for the measurement
}
// Databases represents a databases in a time series source
type Databases interface {
// AllDB lists all databases in the current data source
AllDB(context.Context) ([]Database, error)
// Connect connects to a database in the current data source
Connect(context.Context, *Source) error
// CreateDB creates a database in the current data source
CreateDB(context.Context, *Database) (*Database, error)
// DropDB drops a database in the current data source
DropDB(context.Context, string) error
// AllRP lists all retention policies in the current data source
AllRP(context.Context, string) ([]RetentionPolicy, error)
// CreateRP creates a retention policy in the current data source
CreateRP(context.Context, string, *RetentionPolicy) (*RetentionPolicy, error)
// UpdateRP updates a retention policy in the current data source
UpdateRP(context.Context, string, string, *RetentionPolicy) (*RetentionPolicy, error)
// DropRP drops a retention policy in the current data source
DropRP(context.Context, string, string) error
// GetMeasurements lists measurements in the current data source
GetMeasurements(ctx context.Context, db string, limit, offset int) ([]Measurement, error)
}
// Annotation represents a time-based metadata associated with a source
type Annotation struct {
ID string // ID is the unique annotation identifier
StartTime time.Time // StartTime starts the annotation
EndTime time.Time // EndTime ends the annotation
Text string // Text is the associated user-facing text describing the annotation
Type string // Type describes the kind of annotation
}
// AnnotationStore represents storage and retrieval of annotations
type AnnotationStore interface {
All(ctx context.Context, start, stop time.Time) ([]Annotation, error) // All lists all Annotations between start and stop
Add(context.Context, *Annotation) (*Annotation, error) // Add creates a new annotation in the store
Delete(ctx context.Context, id string) error // Delete removes the annotation from the store
Get(ctx context.Context, id string) (*Annotation, error) // Get retrieves an annotation
Update(context.Context, *Annotation) error // Update replaces annotation
}
// DashboardID is the dashboard ID
type DashboardID int
// Dashboard represents all visual and query data for a dashboard
type Dashboard struct {
ID DashboardID `json:"id"`
Cells []DashboardCell `json:"cells"`
Templates []Template `json:"templates"`
Name string `json:"name"`
Organization string `json:"organization"` // Organization is the organization ID that resource belongs to
}
// Axis represents the visible extents of a visualization
type Axis struct {
Bounds []string `json:"bounds"` // bounds are an arbitrary list of client-defined strings that specify the viewport for a cell
LegacyBounds [2]int64 `json:"-"` // legacy bounds are for testing a migration from an earlier version of axis
Label string `json:"label"` // label is a description of this Axis
Prefix string `json:"prefix"` // Prefix represents a label prefix for formatting axis values
Suffix string `json:"suffix"` // Suffix represents a label suffix for formatting axis values
Base string `json:"base"` // Base represents the radix for formatting axis values
Scale string `json:"scale"` // Scale is the axis formatting scale. Supported: "log", "linear"
}
// CellColor represents the encoding of data into visualizations
type CellColor struct {
ID string `json:"id"` // ID is the unique id of the cell color
Type string `json:"type"` // Type is how the color is used. Accepted (min,max,threshold)
Hex string `json:"hex"` // Hex is the hex number of the color
Name string `json:"name"` // Name is the user-facing name of the hex color
Value string `json:"value"` // Value is the data value mapped to this color
}
// DashboardCell holds visual and query information for a cell
type DashboardCell struct {
ID string `json:"i"`
X int32 `json:"x"`
Y int32 `json:"y"`
W int32 `json:"w"`
H int32 `json:"h"`
Name string `json:"name"`
Queries []DashboardQuery `json:"queries"`
Axes map[string]Axis `json:"axes"`
Type string `json:"type"`
CellColors []CellColor `json:"colors"`
TableOptions TableOptions `json:"tableOptions,omitempty"`
FieldOptions []RenamableField `json:"fieldOptions"`
TimeFormat string `json:"timeFormat"`
DecimalPlaces DecimalPlaces `json:"decimalPlaces"`
// These were added after this code was brought over to influxdb.
Note string `json:"note,omitempty"`
NoteVisibility string `json:"noteVisibility,omitempty"`
}
// RenamableField is a column/row field in a DashboardCell of type Table
type RenamableField struct {
InternalName string `json:"internalName"`
DisplayName string `json:"displayName"`
Visible bool `json:"visible"`
}
// TableOptions is a type of options for a DashboardCell with type Table
type TableOptions struct {
VerticalTimeAxis bool `json:"verticalTimeAxis"`
SortBy RenamableField `json:"sortBy"`
Wrapping string `json:"wrapping"`
FixFirstColumn bool `json:"fixFirstColumn"`
}
// DecimalPlaces indicates whether decimal places should be enforced, and how many digits it should show.
type DecimalPlaces struct {
IsEnforced bool `json:"isEnforced"`
Digits int32 `json:"digits"`
}
// DashboardsStore is the storage and retrieval of dashboards
type DashboardsStore interface {
// All lists all dashboards from the DashboardStore
All(context.Context) ([]Dashboard, error)
// Create a new Dashboard in the DashboardStore
Add(context.Context, Dashboard) (Dashboard, error)
// Delete the Dashboard from the DashboardStore if `ID` exists.
Delete(context.Context, Dashboard) error
// Get retrieves a dashboard if `ID` exists.
Get(ctx context.Context, id DashboardID) (Dashboard, error)
// Update replaces the dashboard information
Update(context.Context, Dashboard) error
}
// Cell is a rectangle and multiple time series queries to visualize.
type Cell struct {
X int32 `json:"x"`
Y int32 `json:"y"`
W int32 `json:"w"`
H int32 `json:"h"`
I string `json:"i"`
Name string `json:"name"`
Queries []Query `json:"queries"`
Axes map[string]Axis `json:"axes"`
Type string `json:"type"`
CellColors []CellColor `json:"colors"`
}
// Layout is a collection of Cells for visualization
type Layout struct {
ID string `json:"id"`
Application string `json:"app"`
Measurement string `json:"measurement"`
Autoflow bool `json:"autoflow"`
Cells []Cell `json:"cells"`
}
// LayoutsStore stores dashboards and associated Cells
type LayoutsStore interface {
// All returns all dashboards in the store
All(context.Context) ([]Layout, error)
// Add creates a new dashboard in the LayoutsStore
Add(context.Context, Layout) (Layout, error)
// Delete the dashboard from the store
Delete(context.Context, Layout) error
// Get retrieves Layout if `ID` exists
Get(ctx context.Context, ID string) (Layout, error)
// Update the dashboard in the store.
Update(context.Context, Layout) error
}
// MappingWildcard is the wildcard value for mappings
const MappingWildcard string = "*"
// A Mapping is the structure that is used to determine a users
// role within an organization. The high level idea is to grant
// certain roles to certain users without them having to be given
// explicit role within the organization.
//
// One can think of a mapping like so:
// Provider:Scheme:Group -> Organization
// github:oauth2:influxdata -> Happy
// beyondcorp:ldap:influxdata -> TheBillHilliettas
//
// Any of Provider, Scheme, or Group may be provided as a wildcard *
// github:oauth2:* -> MyOrg
// *:*:* -> AllOrg
type Mapping struct {
ID string `json:"id"`
Organization string `json:"organizationId"`
Provider string `json:"provider"`
Scheme string `json:"scheme"`
ProviderOrganization string `json:"providerOrganization"`
}
// MappingsStore is the storage and retrieval of Mappings
type MappingsStore interface {
// Add creates a new Mapping.
// The Created mapping is returned back to the user with the
// ID field populated.
Add(context.Context, *Mapping) (*Mapping, error)
// All lists all Mapping in the MappingsStore
All(context.Context) ([]Mapping, error)
// Delete removes an Mapping from the MappingsStore
Delete(context.Context, *Mapping) error
// Get retrieves an Mapping from the MappingsStore
Get(context.Context, string) (*Mapping, error)
// Update updates an Mapping in the MappingsStore
Update(context.Context, *Mapping) error
}
// Organization is a group of resources under a common name
type Organization struct {
ID string `json:"id"`
Name string `json:"name"`
// DefaultRole is the name of the role that is the default for any users added to the organization
DefaultRole string `json:"defaultRole,omitempty"`
}
// OrganizationQuery represents the attributes that a organization may be retrieved by.
// It is predominantly used in the OrganizationsStore.Get method.
// It is expected that only one of ID or Name will be specified, but will prefer ID over Name if both are specified.
type OrganizationQuery struct {
// If an ID is provided in the query, the lookup time for an organization will be O(1).
ID *string
// If Name is provided, the lookup time will be O(n).
Name *string
}
// OrganizationsStore is the storage and retrieval of Organizations
//
// While not necessary for the app to function correctly, it is
// expected that Implementors of the OrganizationsStore will take
// care to guarantee that the Organization.Name is unique. Allowing
// for duplicate names creates a confusing UX experience for the User.
type OrganizationsStore interface {
// Add creates a new Organization.
// The Created organization is returned back to the user with the
// ID field populated.
Add(context.Context, *Organization) (*Organization, error)
// All lists all Organizations in the OrganizationsStore
All(context.Context) ([]Organization, error)
// Delete removes an Organization from the OrganizationsStore
Delete(context.Context, *Organization) error
// Get retrieves an Organization from the OrganizationsStore
Get(context.Context, OrganizationQuery) (*Organization, error)
// Update updates an Organization in the OrganizationsStore
Update(context.Context, *Organization) error
// CreateDefault creates the default organization
CreateDefault(ctx context.Context) error
// DefaultOrganization returns the DefaultOrganization
DefaultOrganization(ctx context.Context) (*Organization, error)
}
// Config is the global application Config for parameters that can be set via
// API, with different sections, such as Auth
type Config struct {
Auth AuthConfig `json:"auth"`
}
// AuthConfig is the global application config section for auth parameters
type AuthConfig struct {
// SuperAdminNewUsers configuration option that specifies which users will auto become super admin
SuperAdminNewUsers bool `json:"superAdminNewUsers"`
}
// ConfigStore is the storage and retrieval of global application Config
type ConfigStore interface {
// Initialize creates the initial configuration
Initialize(context.Context) error
// Get retrieves the whole Config from the ConfigStore
Get(context.Context) (*Config, error)
// Update updates the whole Config in the ConfigStore
Update(context.Context, *Config) error
}
// OrganizationConfig is the organization config for parameters that can
// be set via API, with different sections, such as LogViewer
type OrganizationConfig struct {
OrganizationID string `json:"organization"`
LogViewer LogViewerConfig `json:"logViewer"`
}
// LogViewerConfig is the configuration settings for the Log Viewer UI
type LogViewerConfig struct {
Columns []LogViewerColumn `json:"columns"`
}
// LogViewerColumn is a specific column of the Log Viewer UI
type LogViewerColumn struct {
Name string `json:"name"`
Position int32 `json:"position"`
Encodings []ColumnEncoding `json:"encodings"`
}
// ColumnEncoding is the settings for a specific column of the Log Viewer UI
type ColumnEncoding struct {
Type string `json:"type"`
Value string `json:"value"`
Name string `json:"name,omitempty"`
}
// OrganizationConfigStore is the storage and retrieval of organization Configs
type OrganizationConfigStore interface {
// FindOrCreate gets an existing OrganizationConfig and creates one if none exists
FindOrCreate(ctx context.Context, orgID string) (*OrganizationConfig, error)
// Put replaces the whole organization config in the OrganizationConfigStore
Put(context.Context, *OrganizationConfig) error
}
// BuildInfo is sent to the usage client to track versions and commits
type BuildInfo struct {
Version string
Commit string
}
// BuildStore is the storage and retrieval of Chronograf build information
type BuildStore interface {
Get(context.Context) (BuildInfo, error)
Update(context.Context, BuildInfo) error
}
// Environment is the set of front-end exposed environment variables
// that were set on the server
type Environment struct {
TelegrafSystemInterval time.Duration `json:"telegrafSystemInterval"`
}

View File

@ -1,25 +0,0 @@
package id
import (
"strconv"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
)
// tm generates an id based on current time
type tm struct {
Now func() time.Time
}
// NewTime builds a chronograf.ID generator based on current time
func NewTime() chronograf.ID {
return &tm{
Now: time.Now,
}
}
// Generate creates a string based on the current time as an integer
func (i *tm) Generate() (string, error) {
return strconv.Itoa(int(i.Now().Unix())), nil
}

View File

@ -1,20 +0,0 @@
package id
import (
"github.com/influxdata/influxdb/v2/chronograf"
uuid "github.com/satori/go.uuid"
)
var _ chronograf.ID = &UUID{}
// UUID generates a V4 uuid
type UUID struct{}
// Generate creates a UUID v4 string
func (i *UUID) Generate() (string, error) {
uuid, err := uuid.NewV4()
if err != nil {
return "", err
}
return uuid.String(), nil
}

View File

@ -1,270 +0,0 @@
package influx
import (
"bytes"
"context"
"encoding/json"
"fmt"
"sort"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/id"
)
const (
// AllAnnotations returns all annotations from the chronograf database
AllAnnotations = `SELECT "start_time", "modified_time_ns", "text", "type", "id" FROM "annotations" WHERE "deleted"=false AND time >= %dns and "start_time" <= %d ORDER BY time DESC`
// GetAnnotationID returns all annotations from the chronograf database where id is %s
GetAnnotationID = `SELECT "start_time", "modified_time_ns", "text", "type", "id" FROM "annotations" WHERE "id"='%s' AND "deleted"=false ORDER BY time DESC`
// AnnotationsDB is chronograf. Perhaps later we allow this to be changed
AnnotationsDB = "chronograf"
// DefaultRP is autogen. Perhaps later we allow this to be changed
DefaultRP = "autogen"
// DefaultMeasurement is annotations.
DefaultMeasurement = "annotations"
)
var _ chronograf.AnnotationStore = &AnnotationStore{}
// AnnotationStore stores annotations within InfluxDB
type AnnotationStore struct {
client chronograf.TimeSeries
id chronograf.ID
now Now
}
// NewAnnotationStore constructs an annoation store with a client
func NewAnnotationStore(client chronograf.TimeSeries) *AnnotationStore {
return &AnnotationStore{
client: client,
id: &id.UUID{},
now: time.Now,
}
}
// All lists all Annotations
func (a *AnnotationStore) All(ctx context.Context, start, stop time.Time) ([]chronograf.Annotation, error) {
return a.queryAnnotations(ctx, fmt.Sprintf(AllAnnotations, start.UnixNano(), stop.UnixNano()))
}
// Get retrieves an annotation
func (a *AnnotationStore) Get(ctx context.Context, id string) (*chronograf.Annotation, error) {
annos, err := a.queryAnnotations(ctx, fmt.Sprintf(GetAnnotationID, id))
if err != nil {
return nil, err
}
if len(annos) == 0 {
return nil, chronograf.ErrAnnotationNotFound
}
return &annos[0], nil
}
// Add creates a new annotation in the store
func (a *AnnotationStore) Add(ctx context.Context, anno *chronograf.Annotation) (*chronograf.Annotation, error) {
var err error
anno.ID, err = a.id.Generate()
if err != nil {
return nil, err
}
return anno, a.client.Write(ctx, []chronograf.Point{
toPoint(anno, a.now()),
})
}
// Delete removes the annotation from the store
func (a *AnnotationStore) Delete(ctx context.Context, id string) error {
cur, err := a.Get(ctx, id)
if err != nil {
return err
}
return a.client.Write(ctx, []chronograf.Point{
toDeletedPoint(cur, a.now()),
})
}
// Update replaces annotation; if the annotation's time is different, it
// also removes the previous annotation
func (a *AnnotationStore) Update(ctx context.Context, anno *chronograf.Annotation) error {
cur, err := a.Get(ctx, anno.ID)
if err != nil {
return err
}
if err := a.client.Write(ctx, []chronograf.Point{toPoint(anno, a.now())}); err != nil {
return err
}
// If the updated annotation has a different time, then, we must
// delete the previous annotation
if !cur.EndTime.Equal(anno.EndTime) {
return a.client.Write(ctx, []chronograf.Point{
toDeletedPoint(cur, a.now()),
})
}
return nil
}
// queryAnnotations queries the chronograf db and produces all annotations
func (a *AnnotationStore) queryAnnotations(ctx context.Context, query string) ([]chronograf.Annotation, error) {
res, err := a.client.Query(ctx, chronograf.Query{
Command: query,
DB: AnnotationsDB,
Epoch: "ns",
})
if err != nil {
return nil, err
}
octets, err := res.MarshalJSON()
if err != nil {
return nil, err
}
results := influxResults{}
d := json.NewDecoder(bytes.NewReader(octets))
d.UseNumber()
if err := d.Decode(&results); err != nil {
return nil, err
}
return results.Annotations()
}
func toPoint(anno *chronograf.Annotation, now time.Time) chronograf.Point {
return chronograf.Point{
Database: AnnotationsDB,
RetentionPolicy: DefaultRP,
Measurement: DefaultMeasurement,
Time: anno.EndTime.UnixNano(),
Tags: map[string]string{
"id": anno.ID,
},
Fields: map[string]interface{}{
"deleted": false,
"start_time": anno.StartTime.UnixNano(),
"modified_time_ns": int64(now.UnixNano()),
"text": anno.Text,
"type": anno.Type,
},
}
}
func toDeletedPoint(anno *chronograf.Annotation, now time.Time) chronograf.Point {
return chronograf.Point{
Database: AnnotationsDB,
RetentionPolicy: DefaultRP,
Measurement: DefaultMeasurement,
Time: anno.EndTime.UnixNano(),
Tags: map[string]string{
"id": anno.ID,
},
Fields: map[string]interface{}{
"deleted": true,
"start_time": int64(0),
"modified_time_ns": int64(now.UnixNano()),
"text": "",
"type": "",
},
}
}
type value []interface{}
func (v value) Int64(idx int) (int64, error) {
if idx >= len(v) {
return 0, fmt.Errorf("index %d does not exist in values", idx)
}
n, ok := v[idx].(json.Number)
if !ok {
return 0, fmt.Errorf("value at index %d is not int64, but, %T", idx, v[idx])
}
return n.Int64()
}
func (v value) Time(idx int) (time.Time, error) {
tm, err := v.Int64(idx)
if err != nil {
return time.Time{}, err
}
return time.Unix(0, tm), nil
}
func (v value) String(idx int) (string, error) {
if idx >= len(v) {
return "", fmt.Errorf("index %d does not exist in values", idx)
}
str, ok := v[idx].(string)
if !ok {
return "", fmt.Errorf("value at index %d is not string, but, %T", idx, v[idx])
}
return str, nil
}
type influxResults []struct {
Series []struct {
Values []value `json:"values"`
} `json:"series"`
}
// annotationResult is an intermediate struct to track the latest modified
// time of an annotation
type annotationResult struct {
chronograf.Annotation
// modTime is bookkeeping to handle the case when an update fails; the latest
// modTime will be the record returned
modTime int64
}
// Annotations converts AllAnnotations query to annotations
func (r *influxResults) Annotations() (res []chronograf.Annotation, err error) {
annos := map[string]annotationResult{}
for _, u := range *r {
for _, s := range u.Series {
for _, v := range s.Values {
anno := annotationResult{}
if anno.EndTime, err = v.Time(0); err != nil {
return
}
if anno.StartTime, err = v.Time(1); err != nil {
return
}
if anno.modTime, err = v.Int64(2); err != nil {
return
}
if anno.Text, err = v.String(3); err != nil {
return
}
if anno.Type, err = v.String(4); err != nil {
return
}
if anno.ID, err = v.String(5); err != nil {
return
}
// If there are two annotations with the same id, take
// the annotation with the latest modification time
// This is to prevent issues when an update or delete fails.
// Updates and deletes are multiple step queries.
prev, ok := annos[anno.ID]
if !ok || anno.modTime > prev.modTime {
annos[anno.ID] = anno
}
}
}
}
res = []chronograf.Annotation{}
for _, a := range annos {
res = append(res, a.Annotation)
}
sort.Slice(res, func(i int, j int) bool {
return res[i].StartTime.Before(res[j].StartTime) || res[i].ID < res[j].ID
})
return res, err
}

View File

@ -1,665 +0,0 @@
package influx
import (
"context"
"encoding/json"
"fmt"
"reflect"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
)
func Test_toPoint(t *testing.T) {
tests := []struct {
name string
anno *chronograf.Annotation
now time.Time
want chronograf.Point
}{
0: {
name: "convert annotation to point w/o start and end times",
anno: &chronograf.Annotation{
ID: "1",
Text: "mytext",
Type: "mytype",
},
now: time.Unix(0, 0),
want: chronograf.Point{
Database: AnnotationsDB,
RetentionPolicy: DefaultRP,
Measurement: DefaultMeasurement,
Time: time.Time{}.UnixNano(),
Tags: map[string]string{
"id": "1",
},
Fields: map[string]interface{}{
"deleted": false,
"start_time": time.Time{}.UnixNano(),
"modified_time_ns": int64(time.Unix(0, 0).UnixNano()),
"text": "mytext",
"type": "mytype",
},
},
},
1: {
name: "convert annotation to point with start/end time",
anno: &chronograf.Annotation{
ID: "1",
Text: "mytext",
Type: "mytype",
StartTime: time.Unix(100, 0),
EndTime: time.Unix(200, 0),
},
now: time.Unix(0, 0),
want: chronograf.Point{
Database: AnnotationsDB,
RetentionPolicy: DefaultRP,
Measurement: DefaultMeasurement,
Time: time.Unix(200, 0).UnixNano(),
Tags: map[string]string{
"id": "1",
},
Fields: map[string]interface{}{
"deleted": false,
"start_time": time.Unix(100, 0).UnixNano(),
"modified_time_ns": int64(time.Unix(0, 0).UnixNano()),
"text": "mytext",
"type": "mytype",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := toPoint(tt.anno, tt.now); !reflect.DeepEqual(got, tt.want) {
t.Errorf("toPoint() = %v, want %v", got, tt.want)
}
})
}
}
func Test_toDeletedPoint(t *testing.T) {
tests := []struct {
name string
anno *chronograf.Annotation
now time.Time
want chronograf.Point
}{
0: {
name: "convert annotation to point w/o start and end times",
anno: &chronograf.Annotation{
ID: "1",
EndTime: time.Unix(0, 0),
},
now: time.Unix(0, 0),
want: chronograf.Point{
Database: AnnotationsDB,
RetentionPolicy: DefaultRP,
Measurement: DefaultMeasurement,
Time: 0,
Tags: map[string]string{
"id": "1",
},
Fields: map[string]interface{}{
"deleted": true,
"start_time": int64(0),
"modified_time_ns": int64(0),
"text": "",
"type": "",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := toDeletedPoint(tt.anno, tt.now); !cmp.Equal(got, tt.want) {
t.Errorf("toDeletedPoint() = %s", cmp.Diff(got, tt.want))
}
})
}
}
func Test_value_Int64(t *testing.T) {
tests := []struct {
name string
v value
idx int
want int64
wantErr bool
}{
{
name: "index out of range returns error",
idx: 1,
wantErr: true,
},
{
name: "converts a string to int64",
v: value{
json.Number("1"),
},
idx: 0,
want: int64(1),
},
{
name: "when not a json.Number, return error",
v: value{
"howdy",
},
idx: 0,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tt.v.Int64(tt.idx)
if (err != nil) != tt.wantErr {
t.Errorf("value.Int64() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("value.Int64() = %v, want %v", got, tt.want)
}
})
}
}
func Test_value_Time(t *testing.T) {
tests := []struct {
name string
v value
idx int
want time.Time
wantErr bool
}{
{
name: "index out of range returns error",
idx: 1,
wantErr: true,
},
{
name: "converts a string to int64",
v: value{
json.Number("1"),
},
idx: 0,
want: time.Unix(0, 1),
},
{
name: "when not a json.Number, return error",
v: value{
"howdy",
},
idx: 0,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tt.v.Time(tt.idx)
if (err != nil) != tt.wantErr {
t.Errorf("value.Time() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("value.Time() = %v, want %v", got, tt.want)
}
})
}
}
func Test_value_String(t *testing.T) {
tests := []struct {
name string
v value
idx int
want string
wantErr bool
}{
{
name: "index out of range returns error",
idx: 1,
wantErr: true,
},
{
name: "converts a string",
v: value{
"howdy",
},
idx: 0,
want: "howdy",
},
{
name: "when not a string, return error",
v: value{
0,
},
idx: 0,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tt.v.String(tt.idx)
if (err != nil) != tt.wantErr {
t.Errorf("value.String() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("value.String() = %v, want %v", got, tt.want)
}
})
}
}
func TestAnnotationStore_queryAnnotations(t *testing.T) {
type args struct {
ctx context.Context
query string
}
tests := []struct {
name string
client chronograf.TimeSeries
args args
want []chronograf.Annotation
wantErr bool
}{
{
name: "query error returns an error",
client: &mocks.TimeSeries{
QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) {
return nil, fmt.Errorf("error")
},
},
wantErr: true,
},
{
name: "response marshal error returns an error",
client: &mocks.TimeSeries{
QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) {
return mocks.NewResponse("", fmt.Errorf("")), nil
},
},
wantErr: true,
},
{
name: "Bad JSON returns an error",
client: &mocks.TimeSeries{
QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) {
return mocks.NewResponse(`{}`, nil), nil
},
},
wantErr: true,
},
{
name: "Incorrect fields returns error",
client: &mocks.TimeSeries{
QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) {
return mocks.NewResponse(`[{
"series": [
{
"name": "annotations",
"columns": [
"time",
"deleted",
"id",
"modified_time_ns",
"start_time",
"text",
"type"
],
"values": [
[
1516920117000000000,
true,
"4ba9f836-20e8-4b8e-af51-e1363edd7b6d",
1517425994487495051,
0,
"",
""
]
]
}
]
}
]}]`, nil), nil
},
},
wantErr: true,
},
{
name: "two annotation response",
client: &mocks.TimeSeries{
QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) {
return mocks.NewResponse(`[
{
"series": [
{
"name": "annotations",
"columns": [
"time",
"start_time",
"modified_time_ns",
"text",
"type",
"id"
],
"values": [
[
1516920177345000000,
0,
1516989242129417403,
"mytext",
"mytype",
"ecf3a75d-f1c0-40e8-9790-902701467e92"
],
[
1516920177345000000,
0,
1517425914433539296,
"mytext2",
"mytype2",
"ea0aa94b-969a-4cd5-912a-5db61d502268"
]
]
}
]
}
]`, nil), nil
},
},
want: []chronograf.Annotation{
{
EndTime: time.Unix(0, 1516920177345000000),
StartTime: time.Unix(0, 0),
Text: "mytext2",
Type: "mytype2",
ID: "ea0aa94b-969a-4cd5-912a-5db61d502268",
},
{
EndTime: time.Unix(0, 1516920177345000000),
StartTime: time.Unix(0, 0),
Text: "mytext",
Type: "mytype",
ID: "ecf3a75d-f1c0-40e8-9790-902701467e92",
},
},
},
{
name: "same id returns one",
client: &mocks.TimeSeries{
QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) {
return mocks.NewResponse(`[
{
"series": [
{
"name": "annotations",
"columns": [
"time",
"start_time",
"modified_time_ns",
"text",
"type",
"id"
],
"values": [
[
1516920177345000000,
0,
1516989242129417403,
"mytext",
"mytype",
"ea0aa94b-969a-4cd5-912a-5db61d502268"
],
[
1516920177345000000,
0,
1517425914433539296,
"mytext2",
"mytype2",
"ea0aa94b-969a-4cd5-912a-5db61d502268"
]
]
}
]
}
]`, nil), nil
},
},
want: []chronograf.Annotation{
{
EndTime: time.Unix(0, 1516920177345000000),
StartTime: time.Unix(0, 0),
Text: "mytext2",
Type: "mytype2",
ID: "ea0aa94b-969a-4cd5-912a-5db61d502268",
},
},
},
{
name: "no responses returns empty array",
client: &mocks.TimeSeries{
QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) {
return mocks.NewResponse(`[ { } ]`, nil), nil
},
},
want: []chronograf.Annotation{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
a := &AnnotationStore{
client: tt.client,
}
got, err := a.queryAnnotations(tt.args.ctx, tt.args.query)
if (err != nil) != tt.wantErr {
t.Errorf("AnnotationStore.queryAnnotations() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("AnnotationStore.queryAnnotations() = %v, want %v", got, tt.want)
}
})
}
}
func TestAnnotationStore_Update(t *testing.T) {
type fields struct {
client chronograf.TimeSeries
now Now
}
type args struct {
ctx context.Context
anno *chronograf.Annotation
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
{
name: "no responses returns error",
fields: fields{
client: &mocks.TimeSeries{
QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) {
return mocks.NewResponse(`[ { } ]`, nil), nil
},
WriteF: func(context.Context, []chronograf.Point) error {
return nil
},
},
},
args: args{
ctx: context.Background(),
anno: &chronograf.Annotation{
ID: "1",
},
},
wantErr: true,
},
{
name: "error writing returns error",
fields: fields{
now: func() time.Time { return time.Time{} },
client: &mocks.TimeSeries{
QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) {
return mocks.NewResponse(`[
{
"series": [
{
"name": "annotations",
"columns": [
"time",
"start_time",
"modified_time_ns",
"text",
"type",
"id"
],
"values": [
[
1516920177345000000,
0,
1516989242129417403,
"mytext",
"mytype",
"ecf3a75d-f1c0-40e8-9790-902701467e92"
],
[
1516920177345000000,
0,
1517425914433539296,
"mytext2",
"mytype2",
"ea0aa94b-969a-4cd5-912a-5db61d502268"
]
]
}
]
}
]`, nil), nil
},
WriteF: func(context.Context, []chronograf.Point) error {
return fmt.Errorf("error")
},
},
},
args: args{
ctx: context.Background(),
anno: &chronograf.Annotation{
ID: "1",
},
},
wantErr: true,
},
{
name: "Update with delete",
fields: fields{
now: func() time.Time { return time.Time{} },
client: &mocks.TimeSeries{
QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) {
return mocks.NewResponse(`[
{
"series": [
{
"name": "annotations",
"columns": [
"time",
"start_time",
"modified_time_ns",
"text",
"type",
"id"
],
"values": [
[
1516920177345000000,
0,
1516989242129417403,
"mytext",
"mytype",
"ecf3a75d-f1c0-40e8-9790-902701467e92"
]
]
}
]
}
]`, nil), nil
},
WriteF: func(context.Context, []chronograf.Point) error {
return nil
},
},
},
args: args{
ctx: context.Background(),
anno: &chronograf.Annotation{
ID: "1",
},
},
},
{
name: "Update with delete no delete",
fields: fields{
now: func() time.Time { return time.Time{} },
client: &mocks.TimeSeries{
QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) {
return mocks.NewResponse(`[
{
"series": [
{
"name": "annotations",
"columns": [
"time",
"start_time",
"modified_time_ns",
"text",
"type",
"id"
],
"values": [
[
1516920177345000000,
0,
1516989242129417403,
"mytext",
"mytype",
"ecf3a75d-f1c0-40e8-9790-902701467e92"
]
]
}
]
}
]`, nil), nil
},
WriteF: func(context.Context, []chronograf.Point) error {
return nil
},
},
},
args: args{
ctx: context.Background(),
anno: &chronograf.Annotation{
ID: "ecf3a75d-f1c0-40e8-9790-902701467e92",
EndTime: time.Unix(0, 1516920177345000000),
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
a := &AnnotationStore{
client: tt.fields.client,
now: tt.fields.now,
}
if err := a.Update(tt.args.ctx, tt.args.anno); (err != nil) != tt.wantErr {
t.Errorf("AnnotationStore.Update() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@ -1,94 +0,0 @@
package influx
import (
"fmt"
"net/http"
"time"
jwt "github.com/golang-jwt/jwt"
"github.com/influxdata/influxdb/v2/chronograf"
)
// Authorizer adds optional authorization header to request
type Authorizer interface {
// Set may manipulate the request by adding the Authorization header
Set(req *http.Request) error
}
// NoAuthorization does not add any authorization headers
type NoAuthorization struct{}
// Set does not add authorization
func (n *NoAuthorization) Set(req *http.Request) error { return nil }
// DefaultAuthorization creates either a shared JWT builder, basic auth or Noop
func DefaultAuthorization(src *chronograf.Source) Authorizer {
// Optionally, add the shared secret JWT token creation
if src.Username != "" && src.SharedSecret != "" {
return &BearerJWT{
Username: src.Username,
SharedSecret: src.SharedSecret,
}
} else if src.Username != "" && src.Password != "" {
return &BasicAuth{
Username: src.Username,
Password: src.Password,
}
}
return &NoAuthorization{}
}
// BasicAuth adds Authorization: Basic to the request header
type BasicAuth struct {
Username string
Password string
}
// Set adds the basic auth headers to the request
func (b *BasicAuth) Set(r *http.Request) error {
r.SetBasicAuth(b.Username, b.Password)
return nil
}
// BearerJWT is the default Bearer for InfluxDB
type BearerJWT struct {
Username string
SharedSecret string
Now Now
}
// Set adds an Authorization Bearer to the request if has a shared secret
func (b *BearerJWT) Set(r *http.Request) error {
if b.SharedSecret != "" && b.Username != "" {
token, err := b.Token(b.Username)
if err != nil {
return fmt.Errorf("unable to create token")
}
r.Header.Set("Authorization", "Bearer "+token)
}
return nil
}
// Token returns the expected InfluxDB JWT signed with the sharedSecret
func (b *BearerJWT) Token(username string) (string, error) {
if b.Now == nil {
b.Now = time.Now
}
return JWT(username, b.SharedSecret, b.Now)
}
// JWT returns a token string accepted by InfluxDB using the sharedSecret as an Authorization: Bearer header
func JWT(username, sharedSecret string, now Now) (string, error) {
token := &jwt.Token{
Header: map[string]interface{}{
"typ": "JWT",
"alg": jwt.SigningMethodHS512.Alg(),
},
Claims: jwt.MapClaims{
"username": username,
"exp": now().Add(time.Minute).Unix(),
},
Method: jwt.SigningMethodHS512,
}
return token.SignedString([]byte(sharedSecret))
}

View File

@ -1,44 +0,0 @@
package influx
import (
"testing"
"time"
)
func TestJWT(t *testing.T) {
type args struct {
username string
sharedSecret string
now Now
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{
name: "",
args: args{
username: "AzureDiamond",
sharedSecret: "hunter2",
now: func() time.Time {
return time.Unix(0, 0)
},
},
want: "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjYwLCJ1c2VybmFtZSI6IkF6dXJlRGlhbW9uZCJ9.kUWGwcpCPwV7MEk7luO1rt8036LyvG4bRL_CfseQGmz4b0S34gATx30g4xvqVAV6bwwYE0YU3P8FjG8ij4kc5g",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := JWT(tt.args.username, tt.args.sharedSecret, tt.args.now)
if (err != nil) != tt.wantErr {
t.Errorf("JWT() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("JWT() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -1,269 +0,0 @@
package influx
import (
"bytes"
"context"
"encoding/json"
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/kit/tracing"
)
// AllDB returns all databases from within Influx
func (c *Client) AllDB(ctx context.Context) ([]chronograf.Database, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
return c.showDatabases(ctx)
}
// CreateDB creates a database within Influx
func (c *Client) CreateDB(ctx context.Context, db *chronograf.Database) (*chronograf.Database, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
_, err := c.Query(ctx, chronograf.Query{
Command: fmt.Sprintf(`CREATE DATABASE "%s"`, db.Name),
})
if err != nil {
return nil, err
}
res := &chronograf.Database{Name: db.Name}
return res, nil
}
// DropDB drops a database within Influx
func (c *Client) DropDB(ctx context.Context, db string) error {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
_, err := c.Query(ctx, chronograf.Query{
Command: fmt.Sprintf(`DROP DATABASE "%s"`, db),
DB: db,
})
if err != nil {
return err
}
return nil
}
// AllRP returns all the retention policies for a specific database
func (c *Client) AllRP(ctx context.Context, db string) ([]chronograf.RetentionPolicy, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
return c.showRetentionPolicies(ctx, db)
}
func (c *Client) getRP(ctx context.Context, db, rp string) (chronograf.RetentionPolicy, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
rs, err := c.AllRP(ctx, db)
if err != nil {
return chronograf.RetentionPolicy{}, err
}
for _, r := range rs {
if r.Name == rp {
return r, nil
}
}
return chronograf.RetentionPolicy{}, fmt.Errorf("unknown retention policy")
}
// CreateRP creates a retention policy for a specific database
func (c *Client) CreateRP(ctx context.Context, db string, rp *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
query := fmt.Sprintf(`CREATE RETENTION POLICY "%s" ON "%s" DURATION %s REPLICATION %d`, rp.Name, db, rp.Duration, rp.Replication)
if len(rp.ShardDuration) != 0 {
query = fmt.Sprintf(`%s SHARD DURATION %s`, query, rp.ShardDuration)
}
if rp.Default {
query = fmt.Sprintf(`%s DEFAULT`, query)
}
_, err := c.Query(ctx, chronograf.Query{
Command: query,
DB: db,
})
if err != nil {
return nil, err
}
res, err := c.getRP(ctx, db, rp.Name)
if err != nil {
return nil, err
}
return &res, nil
}
// UpdateRP updates a specific retention policy for a specific database
func (c *Client) UpdateRP(ctx context.Context, db string, rp string, upd *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf(`ALTER RETENTION POLICY "%s" ON "%s"`, rp, db))
if len(upd.Duration) > 0 {
buffer.WriteString(" DURATION " + upd.Duration)
}
if upd.Replication > 0 {
buffer.WriteString(" REPLICATION " + fmt.Sprint(upd.Replication))
}
if len(upd.ShardDuration) > 0 {
buffer.WriteString(" SHARD DURATION " + upd.ShardDuration)
}
if upd.Default {
buffer.WriteString(" DEFAULT")
}
queryRes, err := c.Query(ctx, chronograf.Query{
Command: buffer.String(),
DB: db,
RP: rp,
})
if err != nil {
return nil, err
}
// The ALTER RETENTION POLICIES statements puts the error within the results itself
// So, we have to crack open the results to see what happens
octets, err := queryRes.MarshalJSON()
if err != nil {
return nil, err
}
results := make([]struct{ Error string }, 0)
if err := json.Unmarshal(octets, &results); err != nil {
return nil, err
}
// At last, we can check if there are any error strings
for _, r := range results {
if r.Error != "" {
return nil, fmt.Errorf(r.Error)
}
}
res, err := c.getRP(ctx, db, upd.Name)
if err != nil {
return nil, err
}
return &res, nil
}
// DropRP removes a specific retention policy for a specific database
func (c *Client) DropRP(ctx context.Context, db string, rp string) error {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
_, err := c.Query(ctx, chronograf.Query{
Command: fmt.Sprintf(`DROP RETENTION POLICY "%s" ON "%s"`, rp, db),
DB: db,
RP: rp,
})
if err != nil {
return err
}
return nil
}
// GetMeasurements returns measurements in a specified database, paginated by
// optional limit and offset. If no limit or offset is provided, it defaults to
// a limit of 100 measurements with no offset.
func (c *Client) GetMeasurements(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
return c.showMeasurements(ctx, db, limit, offset)
}
func (c *Client) showDatabases(ctx context.Context) ([]chronograf.Database, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
res, err := c.Query(ctx, chronograf.Query{
Command: `SHOW DATABASES`,
})
if err != nil {
return nil, err
}
octets, err := res.MarshalJSON()
if err != nil {
return nil, err
}
results := showResults{}
if err := json.Unmarshal(octets, &results); err != nil {
return nil, err
}
return results.Databases(), nil
}
func (c *Client) showRetentionPolicies(ctx context.Context, db string) ([]chronograf.RetentionPolicy, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
retentionPolicies, err := c.Query(ctx, chronograf.Query{
Command: fmt.Sprintf(`SHOW RETENTION POLICIES ON "%s"`, db),
DB: db,
})
if err != nil {
return nil, err
}
octets, err := retentionPolicies.MarshalJSON()
if err != nil {
return nil, err
}
results := showResults{}
if err := json.Unmarshal(octets, &results); err != nil {
return nil, err
}
return results.RetentionPolicies(), nil
}
func (c *Client) showMeasurements(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
show := fmt.Sprintf(`SHOW MEASUREMENTS ON "%s"`, db)
if limit > 0 {
show += fmt.Sprintf(" LIMIT %d", limit)
}
if offset > 0 {
show += fmt.Sprintf(" OFFSET %d", offset)
}
measurements, err := c.Query(ctx, chronograf.Query{
Command: show,
DB: db,
})
if err != nil {
return nil, err
}
octets, err := measurements.MarshalJSON()
if err != nil {
return nil, err
}
results := showResults{}
if err := json.Unmarshal(octets, &results); err != nil {
return nil, err
}
return results.Measurements(), nil
}

View File

@ -1,388 +0,0 @@
package influx
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/kit/tracing"
)
var _ chronograf.TimeSeries = &Client{}
var _ chronograf.TSDBStatus = &Client{}
var _ chronograf.Databases = &Client{}
// Shared transports for all clients to prevent leaking connections
var (
skipVerifyTransport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
defaultTransport = &http.Transport{}
)
// Client is a device for retrieving time series data from an InfluxDB instance
type Client struct {
URL *url.URL
Authorizer Authorizer
InsecureSkipVerify bool
Logger chronograf.Logger
}
// Response is a partial JSON decoded InfluxQL response used
// to check for some errors
type Response struct {
Results json.RawMessage
Err string `json:"error,omitempty"`
}
// MarshalJSON returns the raw results bytes from the response
func (r Response) MarshalJSON() ([]byte, error) {
return r.Results, nil
}
func (c *Client) query(ctx context.Context, u *url.URL, q chronograf.Query) (chronograf.Response, error) {
span, _ := tracing.StartSpanFromContext(ctx)
defer span.Finish()
u.Path = "query"
req, err := http.NewRequest("POST", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
command := q.Command
logs := c.Logger.
WithField("component", "proxy").
WithField("host", req.Host).
WithField("command", command).
WithField("db", q.DB).
WithField("rp", q.RP)
logs.Debug("query")
params := req.URL.Query()
params.Set("q", command)
params.Set("db", q.DB)
params.Set("rp", q.RP)
params.Set("epoch", "ms")
if q.Epoch != "" {
params.Set("epoch", q.Epoch)
}
req.URL.RawQuery = params.Encode()
tracing.InjectToHTTPRequest(span, req)
if c.Authorizer != nil {
if err := c.Authorizer.Set(req); err != nil {
logs.Error("Error setting authorization header ", err)
return nil, err
}
}
hc := &http.Client{}
if c.InsecureSkipVerify {
hc.Transport = skipVerifyTransport
} else {
hc.Transport = defaultTransport
}
resp, err := hc.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var response Response
dec := json.NewDecoder(resp.Body)
decErr := dec.Decode(&response)
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("received status code %d from server: err: %s", resp.StatusCode, response.Err)
}
// ignore this error if we got an invalid status code
if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK {
decErr = nil
}
// If we got a valid decode error, send that back
if decErr != nil {
logs.WithField("influx_status", resp.StatusCode).
Error("Error parsing results from influxdb: err:", decErr)
return nil, decErr
}
// If we don't have an error in our json response, and didn't get statusOK
// then send back an error
if resp.StatusCode != http.StatusOK && response.Err != "" {
logs.
WithField("influx_status", resp.StatusCode).
Error("Received non-200 response from influxdb")
return &response, fmt.Errorf("received status code %d from server",
resp.StatusCode)
}
return &response, nil
}
type result struct {
Response chronograf.Response
Err error
}
// Query issues a request to a configured InfluxDB instance for time series
// information specified by query. Queries must be "fully-qualified," and
// include both the database and retention policy. In-flight requests can be
// cancelled using the provided context.
func (c *Client) Query(ctx context.Context, q chronograf.Query) (chronograf.Response, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
resps := make(chan (result))
go func() {
resp, err := c.query(ctx, c.URL, q)
resps <- result{resp, err}
}()
select {
case resp := <-resps:
return resp.Response, resp.Err
case <-ctx.Done():
return nil, chronograf.ErrUpstreamTimeout
}
}
// Connect caches the URL and optional Bearer Authorization for the data source
func (c *Client) Connect(ctx context.Context, src *chronograf.Source) error {
span, _ := tracing.StartSpanFromContext(ctx)
defer span.Finish()
u, err := url.Parse(src.URL)
if err != nil {
return err
}
c.Authorizer = DefaultAuthorization(src)
// Only allow acceptance of all certs if the scheme is https AND the user opted into to the setting.
if u.Scheme == "https" && src.InsecureSkipVerify {
c.InsecureSkipVerify = src.InsecureSkipVerify
}
c.URL = u
return nil
}
// Users transforms InfluxDB into a user store
func (c *Client) Users(ctx context.Context) chronograf.UsersStore {
return c
}
// Roles aren't support in OSS
func (c *Client) Roles(ctx context.Context) (chronograf.RolesStore, error) {
return nil, fmt.Errorf("roles not support in open-source InfluxDB. Roles are support in Influx Enterprise")
}
// Ping hits the influxdb ping endpoint and returns the type of influx
func (c *Client) Ping(ctx context.Context) error {
_, _, err := c.pingTimeout(ctx)
return err
}
// Version hits the influxdb ping endpoint and returns the version of influx
func (c *Client) Version(ctx context.Context) (string, error) {
version, _, err := c.pingTimeout(ctx)
return version, err
}
// Type hits the influxdb ping endpoint and returns the type of influx running
func (c *Client) Type(ctx context.Context) (string, error) {
_, tsdbType, err := c.pingTimeout(ctx)
return tsdbType, err
}
func (c *Client) pingTimeout(ctx context.Context) (string, string, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
resps := make(chan (pingResult))
go func() {
version, tsdbType, err := c.ping(ctx, c.URL)
resps <- pingResult{version, tsdbType, err}
}()
select {
case resp := <-resps:
return resp.Version, resp.Type, resp.Err
case <-ctx.Done():
return "", "", chronograf.ErrUpstreamTimeout
}
}
type pingResult struct {
Version string
Type string
Err error
}
func (c *Client) ping(ctx context.Context, u *url.URL) (string, string, error) {
span, _ := tracing.StartSpanFromContext(ctx)
defer span.Finish()
u.Path = "ping"
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return "", "", err
}
tracing.InjectToHTTPRequest(span, req)
hc := &http.Client{}
if c.InsecureSkipVerify {
hc.Transport = skipVerifyTransport
} else {
hc.Transport = defaultTransport
}
resp, err := hc.Do(req)
if err != nil {
return "", "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", "", err
}
if resp.StatusCode != http.StatusNoContent {
var err = fmt.Errorf(string(body))
return "", "", err
}
version := resp.Header.Get("X-Influxdb-Build")
if version == "ENT" {
return version, chronograf.InfluxEnterprise, nil
}
version = resp.Header.Get("X-Influxdb-Version")
if strings.Contains(version, "-c") {
return version, chronograf.InfluxEnterprise, nil
} else if strings.Contains(version, "relay") {
return version, chronograf.InfluxRelay, nil
}
return version, chronograf.InfluxDB, nil
}
// Write POSTs line protocol to a database and retention policy
func (c *Client) Write(ctx context.Context, points []chronograf.Point) error {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
for _, point := range points {
if err := c.writePoint(ctx, &point); err != nil {
return err
}
}
return nil
}
func (c *Client) writePoint(ctx context.Context, point *chronograf.Point) error {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
lp, err := toLineProtocol(point)
if err != nil {
return err
}
err = c.write(ctx, c.URL, point.Database, point.RetentionPolicy, lp)
if err == nil {
return nil
}
// Some influxdb errors should not be treated as errors
if strings.Contains(err.Error(), "hinted handoff queue not empty") {
// This is an informational message
return nil
}
// If the database was not found, try to recreate it:
if strings.Contains(err.Error(), "database not found") {
_, err = c.CreateDB(ctx, &chronograf.Database{
Name: point.Database,
})
if err != nil {
return err
}
// retry the write
return c.write(ctx, c.URL, point.Database, point.RetentionPolicy, lp)
}
return err
}
func (c *Client) write(ctx context.Context, u *url.URL, db, rp, lp string) error {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
u.Path = "write"
req, err := http.NewRequest("POST", u.String(), strings.NewReader(lp))
if err != nil {
return err
}
req.Header.Set("Content-Type", "text/plain; charset=utf-8")
if c.Authorizer != nil {
if err := c.Authorizer.Set(req); err != nil {
return err
}
}
params := req.URL.Query()
params.Set("db", db)
params.Set("rp", rp)
req.URL.RawQuery = params.Encode()
tracing.InjectToHTTPRequest(span, req)
hc := &http.Client{}
if c.InsecureSkipVerify {
hc.Transport = skipVerifyTransport
} else {
hc.Transport = defaultTransport
}
errChan := make(chan (error))
go func() {
resp, err := hc.Do(req)
if err != nil {
errChan <- err
return
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNoContent {
errChan <- nil
return
}
var response Response
dec := json.NewDecoder(resp.Body)
err = dec.Decode(&response)
if err != nil && err.Error() != "EOF" {
errChan <- err
return
}
errChan <- errors.New(response.Err)
}()
select {
case err := <-errChan:
return err
case <-ctx.Done():
return chronograf.ErrUpstreamTimeout
}
}

View File

@ -1,538 +0,0 @@
package influx_test
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"time"
gojwt "github.com/golang-jwt/jwt"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/influx"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
)
// NewClient initializes an HTTP Client for InfluxDB.
func NewClient(host string, lg chronograf.Logger) (*influx.Client, error) {
l := lg.WithField("host", host)
u, err := url.Parse(host)
if err != nil {
l.Error("Error initialize influx client: err:", err)
return nil, err
}
return &influx.Client{
URL: u,
Logger: l,
}, nil
}
func Test_Influx_MakesRequestsToQueryEndpoint(t *testing.T) {
t.Parallel()
called := false
ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(http.StatusOK)
rw.Write([]byte(`{}`))
called = true
if path := r.URL.Path; path != "/query" {
t.Error("Expected the path to contain `/query` but was", path)
}
}))
defer ts.Close()
var series chronograf.TimeSeries
series, err := NewClient(ts.URL, &chronograf.NoopLogger{})
if err != nil {
t.Fatal("Unexpected error initializing client: err:", err)
}
query := chronograf.Query{
Command: "show databases",
}
_, err = series.Query(context.Background(), query)
if err != nil {
t.Fatal("Expected no error but was", err)
}
if !called {
t.Error("Expected http request to Influx but there was none")
}
}
type MockAuthorization struct {
Bearer string
Error error
}
func (m *MockAuthorization) Set(req *http.Request) error {
return m.Error
}
func Test_Influx_AuthorizationBearer(t *testing.T) {
t.Parallel()
ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(http.StatusOK)
rw.Write([]byte(`{}`))
auth := r.Header.Get("Authorization")
tokenString := strings.Split(auth, " ")[1]
token, err := gojwt.Parse(tokenString, func(token *gojwt.Token) (interface{}, error) {
if _, ok := token.Method.(*gojwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte("42"), nil
})
if err != nil {
t.Errorf("Invalid token %v", err)
}
if claims, ok := token.Claims.(gojwt.MapClaims); ok && token.Valid {
got := claims["username"]
want := "AzureDiamond"
if got != want {
t.Errorf("Test_Influx_AuthorizationBearer got %s want %s", got, want)
}
return
}
t.Errorf("Invalid token %v", token)
}))
defer ts.Close()
src := &chronograf.Source{
Username: "AzureDiamond",
URL: ts.URL,
SharedSecret: "42",
}
series := &influx.Client{
Logger: &chronograf.NoopLogger{},
}
series.Connect(context.Background(), src)
query := chronograf.Query{
Command: "show databases",
}
_, err := series.Query(context.Background(), query)
if err != nil {
t.Fatal("Expected no error but was", err)
}
}
func Test_Influx_AuthorizationBearerCtx(t *testing.T) {
t.Parallel()
ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(http.StatusOK)
rw.Write([]byte(`{}`))
got := r.Header.Get("Authorization")
if got == "" {
t.Error("Test_Influx_AuthorizationBearerCtx got empty string")
}
incomingToken := strings.Split(got, " ")[1]
alg := func(token *gojwt.Token) (interface{}, error) {
if _, ok := token.Method.(*gojwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte("hunter2"), nil
}
claims := &gojwt.MapClaims{}
token, err := gojwt.ParseWithClaims(string(incomingToken), claims, alg)
if err != nil {
t.Errorf("Test_Influx_AuthorizationBearerCtx unexpected claims error %v", err)
}
if !token.Valid {
t.Error("Test_Influx_AuthorizationBearerCtx unexpected valid claim")
}
if err := claims.Valid(); err != nil {
t.Errorf("Test_Influx_AuthorizationBearerCtx not expires already %v", err)
}
user := (*claims)["username"].(string)
if user != "AzureDiamond" {
t.Errorf("Test_Influx_AuthorizationBearerCtx expected username AzureDiamond but got %s", user)
}
}))
defer ts.Close()
series := &influx.Client{
Logger: &chronograf.NoopLogger{},
}
err := series.Connect(context.Background(), &chronograf.Source{
Username: "AzureDiamond",
SharedSecret: "hunter2",
URL: ts.URL,
InsecureSkipVerify: true,
})
if err != nil {
t.Fatal(err)
}
query := chronograf.Query{
Command: "show databases",
}
_, err = series.Query(context.Background(), query)
if err != nil {
t.Fatal("Expected no error but was", err)
}
}
func Test_Influx_AuthorizationBearerFailure(t *testing.T) {
t.Parallel()
bearer := &MockAuthorization{
Error: fmt.Errorf("cracked1337"),
}
u, _ := url.Parse("http://haxored.net")
u.User = url.UserPassword("AzureDiamond", "hunter2")
series := &influx.Client{
URL: u,
Authorizer: bearer,
Logger: &chronograf.NoopLogger{},
}
query := chronograf.Query{
Command: "show databases",
}
_, err := series.Query(context.Background(), query)
if err == nil {
t.Fatal("Test_Influx_AuthorizationBearerFailure Expected error but received nil")
}
}
func Test_Influx_HTTPS_Failure(t *testing.T) {
called := false
ts := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
called = true
}))
defer ts.Close()
ctx := context.Background()
var series chronograf.TimeSeries
series, err := NewClient(ts.URL, &chronograf.NoopLogger{})
if err != nil {
t.Fatal("Unexpected error initializing client: err:", err)
}
src := chronograf.Source{
URL: ts.URL,
}
if err := series.Connect(ctx, &src); err != nil {
t.Fatal("Unexpected error connecting to client: err:", err)
}
query := chronograf.Query{
Command: "show databases",
}
_, err = series.Query(ctx, query)
if err == nil {
t.Error("Expected error but was successful")
}
if called {
t.Error("Expected http request to fail, but, succeeded")
}
}
func Test_Influx_HTTPS_InsecureSkipVerify(t *testing.T) {
t.Parallel()
called := false
q := ""
ts := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(http.StatusOK)
rw.Write([]byte(`{}`))
called = true
if path := r.URL.Path; path != "/query" {
t.Error("Expected the path to contain `/query` but was", path)
}
values := r.URL.Query()
q = values.Get("q")
}))
defer ts.Close()
ctx := context.Background()
var series chronograf.TimeSeries
series, err := NewClient(ts.URL, &chronograf.NoopLogger{})
if err != nil {
t.Fatal("Unexpected error initializing client: err:", err)
}
src := chronograf.Source{
URL: ts.URL,
InsecureSkipVerify: true,
}
if err := series.Connect(ctx, &src); err != nil {
t.Fatal("Unexpected error connecting to client: err:", err)
}
query := chronograf.Query{
Command: "show databases",
}
_, err = series.Query(ctx, query)
if err != nil {
t.Fatal("Expected no error but was", err)
}
if !called {
t.Error("Expected http request to Influx but there was none")
}
called = false
q = ""
query = chronograf.Query{
Command: `select "usage_user" from cpu`,
}
_, err = series.Query(ctx, query)
if err != nil {
t.Fatal("Expected no error but was", err)
}
if !called {
t.Error("Expected http request to Influx but there was none")
}
if q != `select "usage_user" from cpu` {
t.Errorf("Unexpected query: %s", q)
}
}
func Test_Influx_CancelsInFlightRequests(t *testing.T) {
t.Parallel()
started := make(chan bool, 1)
finished := make(chan bool, 1)
ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
started <- true
time.Sleep(20 * time.Millisecond)
finished <- true
}))
defer func() {
ts.CloseClientConnections()
ts.Close()
}()
series, _ := NewClient(ts.URL, &chronograf.NoopLogger{})
ctx, cancel := context.WithCancel(context.Background())
errs := make(chan (error))
go func() {
query := chronograf.Query{
Command: "show databases",
}
_, err := series.Query(ctx, query)
errs <- err
}()
timer := time.NewTimer(10 * time.Second)
defer timer.Stop()
select {
case s := <-started:
if !s {
t.Errorf("Expected cancellation during request processing. Started: %t", s)
}
case <-timer.C:
t.Fatalf("Expected server to finish")
}
cancel()
select {
case f := <-finished:
if !f {
t.Errorf("Expected cancellation during request processing. Finished: %t", f)
}
case <-timer.C:
t.Fatalf("Expected server to finish")
}
err := <-errs
if err != chronograf.ErrUpstreamTimeout {
t.Error("Expected timeout error but wasn't. err was", err)
}
}
func Test_Influx_RejectsInvalidHosts(t *testing.T) {
_, err := NewClient(":", &chronograf.NoopLogger{})
if err == nil {
t.Fatal("Expected err but was nil")
}
}
func Test_Influx_ReportsInfluxErrs(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(http.StatusOK)
}))
defer ts.Close()
cl, err := NewClient(ts.URL, &chronograf.NoopLogger{})
if err != nil {
t.Fatal("Encountered unexpected error while initializing influx client: err:", err)
}
_, err = cl.Query(context.Background(), chronograf.Query{
Command: "show shards",
DB: "_internal",
RP: "autogen",
})
if err == nil {
t.Fatal("Expected an error but received none")
}
}
func TestClient_Roles(t *testing.T) {
c := &influx.Client{}
_, err := c.Roles(context.Background())
if err == nil {
t.Errorf("Client.Roles() want error")
}
}
func TestClient_write(t *testing.T) {
type fields struct {
Authorizer influx.Authorizer
InsecureSkipVerify bool
Logger chronograf.Logger
}
type args struct {
ctx context.Context
point chronograf.Point
}
tests := []struct {
name string
fields fields
args args
body string
wantErr bool
}{
{
name: "write point to influxdb",
fields: fields{
Logger: mocks.NewLogger(),
},
args: args{
ctx: context.Background(),
point: chronograf.Point{
Database: "mydb",
RetentionPolicy: "myrp",
Measurement: "mymeas",
Time: 10,
Tags: map[string]string{
"tag1": "value1",
"tag2": "value2",
},
Fields: map[string]interface{}{
"field1": "value1",
},
},
},
},
{
name: "point without fields",
args: args{
ctx: context.Background(),
point: chronograf.Point{},
},
wantErr: true,
},
{
name: "hinted handoff errors are not errors really.",
fields: fields{
Logger: mocks.NewLogger(),
},
args: args{
ctx: context.Background(),
point: chronograf.Point{
Database: "mydb",
RetentionPolicy: "myrp",
Measurement: "mymeas",
Time: 10,
Tags: map[string]string{
"tag1": "value1",
"tag2": "value2",
},
Fields: map[string]interface{}{
"field1": "value1",
},
},
},
body: `{"error":"hinted handoff queue not empty"}`,
},
{
name: "database not found creates a new db",
fields: fields{
Logger: mocks.NewLogger(),
},
args: args{
ctx: context.Background(),
point: chronograf.Point{
Database: "mydb",
RetentionPolicy: "myrp",
Measurement: "mymeas",
Time: 10,
Tags: map[string]string{
"tag1": "value1",
"tag2": "value2",
},
Fields: map[string]interface{}{
"field1": "value1",
},
},
},
body: `{"error":"database not found"}`,
},
{
name: "error from database reported",
fields: fields{
Logger: mocks.NewLogger(),
},
args: args{
ctx: context.Background(),
point: chronograf.Point{
Database: "mydb",
RetentionPolicy: "myrp",
Measurement: "mymeas",
Time: 10,
Tags: map[string]string{
"tag1": "value1",
"tag2": "value2",
},
Fields: map[string]interface{}{
"field1": "value1",
},
},
},
body: `{"error":"oh no!"}`,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
retry := 0 // if the retry is > 0 then we don't error
ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
if strings.HasPrefix(r.RequestURI, "/write") {
if tt.body == "" || retry > 0 {
rw.WriteHeader(http.StatusNoContent)
return
}
retry++
rw.WriteHeader(http.StatusBadRequest)
rw.Write([]byte(tt.body))
return
}
rw.WriteHeader(http.StatusOK)
rw.Write([]byte(`{"results":[{}]}`))
}))
defer ts.Close()
u, _ := url.Parse(ts.URL)
c := &influx.Client{
URL: u,
Authorizer: tt.fields.Authorizer,
InsecureSkipVerify: tt.fields.InsecureSkipVerify,
Logger: tt.fields.Logger,
}
if err := c.Write(tt.args.ctx, []chronograf.Point{tt.args.point}); (err != nil) != tt.wantErr {
t.Errorf("Client.write() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@ -1,84 +0,0 @@
package influx
import (
"fmt"
"sort"
"strings"
"github.com/influxdata/influxdb/v2/chronograf"
)
var (
escapeMeasurement = strings.NewReplacer(
`,` /* to */, `\,`,
` ` /* to */, `\ `,
)
escapeKeys = strings.NewReplacer(
`,` /* to */, `\,`,
`"` /* to */, `\"`,
` ` /* to */, `\ `,
`=` /* to */, `\=`,
)
escapeTagValues = strings.NewReplacer(
`,` /* to */, `\,`,
`"` /* to */, `\"`,
` ` /* to */, `\ `,
`=` /* to */, `\=`,
)
escapeFieldStrings = strings.NewReplacer(
`"` /* to */, `\"`,
`\` /* to */, `\\`,
)
)
func toLineProtocol(point *chronograf.Point) (string, error) {
measurement := escapeMeasurement.Replace(point.Measurement)
if len(measurement) == 0 {
return "", fmt.Errorf("measurement required to write point")
}
if len(point.Fields) == 0 {
return "", fmt.Errorf("at least one field required to write point")
}
tags := []string{}
for tag, value := range point.Tags {
if value != "" {
t := fmt.Sprintf("%s=%s", escapeKeys.Replace(tag), escapeTagValues.Replace(value))
tags = append(tags, t)
}
}
// it is faster to insert data into influx db if the tags are sorted
sort.Strings(tags)
fields := []string{}
for field, value := range point.Fields {
var format string
switch v := value.(type) {
case int64, int32, int16, int8, int:
format = fmt.Sprintf("%s=%di", escapeKeys.Replace(field), v)
case uint64, uint32, uint16, uint8, uint:
format = fmt.Sprintf("%s=%du", escapeKeys.Replace(field), v)
case float64, float32:
format = fmt.Sprintf("%s=%f", escapeKeys.Replace(field), v)
case string:
format = fmt.Sprintf(`%s="%s"`, escapeKeys.Replace(field), escapeFieldStrings.Replace(v))
case bool:
format = fmt.Sprintf("%s=%t", escapeKeys.Replace(field), v)
}
if format != "" {
fields = append(fields, format)
}
}
sort.Strings(fields)
lp := measurement
if len(tags) > 0 {
lp += fmt.Sprintf(",%s", strings.Join(tags, ","))
}
lp += fmt.Sprintf(" %s", strings.Join(fields, ","))
if point.Time != 0 {
lp += fmt.Sprintf(" %d", point.Time)
}
return lp, nil
}

View File

@ -1,129 +0,0 @@
package influx
import (
"testing"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
)
func Test_toLineProtocol(t *testing.T) {
tests := []struct {
name string
point *chronograf.Point
want string
wantErr bool
}{
0: {
name: "requires a measurement",
point: &chronograf.Point{},
wantErr: true,
},
1: {
name: "requires at least one field",
point: &chronograf.Point{
Measurement: "telegraf",
},
wantErr: true,
},
2: {
name: "no tags produces line protocol",
point: &chronograf.Point{
Measurement: "telegraf",
Fields: map[string]interface{}{
"myfield": 1,
},
},
want: "telegraf myfield=1i",
},
3: {
name: "test all influx data types",
point: &chronograf.Point{
Measurement: "telegraf",
Fields: map[string]interface{}{
"int": 19,
"uint": uint(85),
"float": 88.0,
"string": "mph",
"time_machine": true,
"invalidField": time.Time{},
},
},
want: `telegraf float=88.000000,int=19i,string="mph",time_machine=true,uint=85u`,
},
4: {
name: "test all influx data types",
point: &chronograf.Point{
Measurement: "telegraf",
Tags: map[string]string{
"marty": "mcfly",
"doc": "brown",
},
Fields: map[string]interface{}{
"int": 19,
"uint": uint(85),
"float": 88.0,
"string": "mph",
"time_machine": true,
"invalidField": time.Time{},
},
Time: 497115501000000000,
},
want: `telegraf,doc=brown,marty=mcfly float=88.000000,int=19i,string="mph",time_machine=true,uint=85u 497115501000000000`,
},
5: {
name: "measurements with comma or spaces are escaped",
point: &chronograf.Point{
Measurement: "O Romeo, Romeo, wherefore art thou Romeo",
Tags: map[string]string{
"part": "JULIET",
},
Fields: map[string]interface{}{
"act": 2,
"scene": 2,
"page": 2,
"line": 33,
},
},
want: `O\ Romeo\,\ Romeo\,\ wherefore\ art\ thou\ Romeo,part=JULIET act=2i,line=33i,page=2i,scene=2i`,
},
6: {
name: "tags with comma, quota, space, equal are escaped",
point: &chronograf.Point{
Measurement: "quotes",
Tags: map[string]string{
"comma,": "comma,",
`quote"`: `quote"`,
"space ": `space "`,
"equal=": "equal=",
},
Fields: map[string]interface{}{
"myfield": 1,
},
},
want: `quotes,comma\,=comma\,,equal\==equal\=,quote\"=quote\",space\ =space\ \" myfield=1i`,
},
7: {
name: "fields with quotes or backslashes are escaped",
point: &chronograf.Point{
Measurement: "quotes",
Fields: map[string]interface{}{
`quote"\`: `quote"\`,
},
},
want: `quotes quote\"\="quote\"\\"`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := toLineProtocol(tt.point)
if (err != nil) != tt.wantErr {
t.Errorf("toLineProtocol() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("toLineProtocol() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -1,6 +0,0 @@
package influx
import "time"
// Now returns the current time
type Now func() time.Time

View File

@ -1,278 +0,0 @@
package influx
import (
"context"
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
)
var (
// AllowAllDB means a user gets both read and write permissions for a db
AllowAllDB = chronograf.Allowances{"WRITE", "READ"}
// AllowAllAdmin means a user gets both read and write permissions for an admin
AllowAllAdmin = chronograf.Allowances{"ALL"}
// AllowRead means a user is only able to read the database.
AllowRead = chronograf.Allowances{"READ"}
// AllowWrite means a user is able to only write to the database
AllowWrite = chronograf.Allowances{"WRITE"}
// NoPrivileges occasionally shows up as a response for a users grants.
NoPrivileges = "NO PRIVILEGES"
// AllPrivileges means that a user has both read and write perms
AllPrivileges = "ALL PRIVILEGES"
// All means a user has both read and write perms. Alternative to AllPrivileges
All = "ALL"
// Read means a user can read a database
Read = "READ"
// Write means a user can write to a database
Write = "WRITE"
)
// Permissions return just READ and WRITE for OSS Influx
func (c *Client) Permissions(context.Context) chronograf.Permissions {
return chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: AllowAllAdmin,
},
{
Scope: chronograf.DBScope,
Allowed: AllowAllDB,
},
}
}
// showResults is used to deserialize InfluxQL SHOW commands
type showResults []struct {
Series []struct {
Values [][]interface{} `json:"values"`
} `json:"series"`
}
// Users converts SHOW USERS to chronograf Users
func (r *showResults) Users() []chronograf.User {
res := []chronograf.User{}
for _, u := range *r {
for _, s := range u.Series {
for _, v := range s.Values {
if name, ok := v[0].(string); !ok {
continue
} else if admin, ok := v[1].(bool); !ok {
continue
} else {
c := chronograf.User{
Name: name,
Permissions: chronograf.Permissions{},
}
if admin {
c.Permissions = adminPerms()
}
res = append(res, c)
}
}
}
}
return res
}
// Databases converts SHOW DATABASES to chronograf Databases
func (r *showResults) Databases() []chronograf.Database {
res := []chronograf.Database{}
for _, u := range *r {
for _, s := range u.Series {
for _, v := range s.Values {
if name, ok := v[0].(string); !ok {
continue
} else {
d := chronograf.Database{Name: name}
res = append(res, d)
}
}
}
}
return res
}
func (r *showResults) RetentionPolicies() []chronograf.RetentionPolicy {
res := []chronograf.RetentionPolicy{}
for _, u := range *r {
for _, s := range u.Series {
for _, v := range s.Values {
if name, ok := v[0].(string); !ok {
continue
} else if duration, ok := v[1].(string); !ok {
continue
} else if sduration, ok := v[2].(string); !ok {
continue
} else if replication, ok := v[3].(float64); !ok {
continue
} else if def, ok := v[4].(bool); !ok {
continue
} else {
d := chronograf.RetentionPolicy{
Name: name,
Duration: duration,
ShardDuration: sduration,
Replication: int32(replication),
Default: def,
}
res = append(res, d)
}
}
}
}
return res
}
// Measurements converts SHOW MEASUREMENTS to chronograf Measurement
func (r *showResults) Measurements() []chronograf.Measurement {
res := []chronograf.Measurement{}
for _, u := range *r {
for _, s := range u.Series {
for _, v := range s.Values {
if name, ok := v[0].(string); !ok {
continue
} else {
d := chronograf.Measurement{Name: name}
res = append(res, d)
}
}
}
}
return res
}
// Permissions converts SHOW GRANTS to chronograf.Permissions
func (r *showResults) Permissions() chronograf.Permissions {
res := []chronograf.Permission{}
for _, u := range *r {
for _, s := range u.Series {
for _, v := range s.Values {
if db, ok := v[0].(string); !ok {
continue
} else if priv, ok := v[1].(string); !ok {
continue
} else {
c := chronograf.Permission{
Name: db,
Scope: chronograf.DBScope,
}
switch priv {
case AllPrivileges, All:
c.Allowed = AllowAllDB
case Read:
c.Allowed = AllowRead
case Write:
c.Allowed = AllowWrite
default:
// sometimes influx reports back NO PRIVILEGES
continue
}
res = append(res, c)
}
}
}
}
return res
}
func adminPerms() chronograf.Permissions {
return []chronograf.Permission{
{
Scope: chronograf.AllScope,
Allowed: AllowAllAdmin,
},
}
}
// ToInfluxQL converts the permission into InfluxQL
func ToInfluxQL(action, preposition, username string, perm chronograf.Permission) string {
if perm.Scope == chronograf.AllScope {
return fmt.Sprintf(`%s ALL PRIVILEGES %s "%s"`, action, preposition, username)
} else if len(perm.Allowed) == 0 {
// All privileges are to be removed for this user on this database
return fmt.Sprintf(`%s ALL PRIVILEGES ON "%s" %s "%s"`, action, perm.Name, preposition, username)
}
priv := ToPriv(perm.Allowed)
if priv == NoPrivileges {
return ""
}
return fmt.Sprintf(`%s %s ON "%s" %s "%s"`, action, priv, perm.Name, preposition, username)
}
// ToRevoke converts the permission into InfluxQL revokes
func ToRevoke(username string, perm chronograf.Permission) string {
return ToInfluxQL("REVOKE", "FROM", username, perm)
}
// ToGrant converts the permission into InfluxQL grants
func ToGrant(username string, perm chronograf.Permission) string {
if len(perm.Allowed) == 0 {
return ""
}
return ToInfluxQL("GRANT", "TO", username, perm)
}
// ToPriv converts chronograf allowances to InfluxQL
func ToPriv(a chronograf.Allowances) string {
if len(a) == 0 {
return NoPrivileges
}
hasWrite := false
hasRead := false
for _, aa := range a {
if aa == Read {
hasRead = true
} else if aa == Write {
hasWrite = true
} else if aa == All {
hasRead, hasWrite = true, true
}
}
if hasWrite && hasRead {
return All
} else if hasWrite {
return Write
} else if hasRead {
return Read
}
return NoPrivileges
}
// Difference compares two permission sets and returns a set to be revoked and a set to be added
func Difference(wants chronograf.Permissions, haves chronograf.Permissions) (revoke chronograf.Permissions, add chronograf.Permissions) {
for _, want := range wants {
found := false
for _, got := range haves {
if want.Scope != got.Scope || want.Name != got.Name {
continue
}
found = true
if len(want.Allowed) == 0 {
revoke = append(revoke, want)
} else {
add = append(add, want)
}
break
}
if !found {
add = append(add, want)
}
}
for _, got := range haves {
found := false
for _, want := range wants {
if want.Scope != got.Scope || want.Name != got.Name {
continue
}
found = true
break
}
if !found {
revoke = append(revoke, got)
}
}
return
}

View File

@ -1,422 +0,0 @@
package influx
import (
"encoding/json"
"reflect"
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
)
func TestDifference(t *testing.T) {
t.Parallel()
type args struct {
wants chronograf.Permissions
haves chronograf.Permissions
}
tests := []struct {
name string
args args
wantRevoke chronograf.Permissions
wantAdd chronograf.Permissions
}{
{
name: "add write to permissions",
args: args{
wants: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "tensorflowdb",
Allowed: []string{"READ", "WRITE"},
},
},
haves: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "tensorflowdb",
Allowed: []string{"READ"},
},
},
},
wantRevoke: nil,
wantAdd: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "tensorflowdb",
Allowed: []string{"READ", "WRITE"},
},
},
},
{
name: "revoke write to permissions",
args: args{
wants: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "tensorflowdb",
Allowed: []string{"READ"},
},
},
haves: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "tensorflowdb",
Allowed: []string{"READ", "WRITE"},
},
},
},
wantRevoke: nil,
wantAdd: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "tensorflowdb",
Allowed: []string{"READ"},
},
},
},
{
name: "revoke all permissions",
args: args{
wants: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "tensorflowdb",
Allowed: []string{},
},
},
haves: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "tensorflowdb",
Allowed: []string{"READ", "WRITE"},
},
},
},
wantRevoke: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "tensorflowdb",
Allowed: []string{},
},
},
wantAdd: nil,
},
{
name: "add permissions different db",
args: args{
wants: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "new",
Allowed: []string{"READ"},
},
},
haves: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "old",
Allowed: []string{"READ", "WRITE"},
},
},
},
wantRevoke: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "old",
Allowed: []string{"READ", "WRITE"},
},
},
wantAdd: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "new",
Allowed: []string{"READ"},
},
},
},
}
for _, tt := range tests {
gotRevoke, gotAdd := Difference(tt.args.wants, tt.args.haves)
if !reflect.DeepEqual(gotRevoke, tt.wantRevoke) {
t.Errorf("%q. Difference() gotRevoke = %v, want %v", tt.name, gotRevoke, tt.wantRevoke)
}
if !reflect.DeepEqual(gotAdd, tt.wantAdd) {
t.Errorf("%q. Difference() gotAdd = %v, want %v", tt.name, gotAdd, tt.wantAdd)
}
}
}
func TestToPriv(t *testing.T) {
t.Parallel()
type args struct {
a chronograf.Allowances
}
tests := []struct {
name string
args args
want string
}{
{
name: "no privs",
args: args{
a: chronograf.Allowances{},
},
want: NoPrivileges,
},
{
name: "read and write privs",
args: args{
a: chronograf.Allowances{"READ", "WRITE"},
},
want: All,
},
{
name: "write privs",
args: args{
a: chronograf.Allowances{"WRITE"},
},
want: Write,
},
{
name: "read privs",
args: args{
a: chronograf.Allowances{"READ"},
},
want: Read,
},
{
name: "all privs",
args: args{
a: chronograf.Allowances{"ALL"},
},
want: All,
},
{
name: "bad privs",
args: args{
a: chronograf.Allowances{"BAD"},
},
want: NoPrivileges,
},
}
for _, tt := range tests {
if got := ToPriv(tt.args.a); got != tt.want {
t.Errorf("%q. ToPriv() = %v, want %v", tt.name, got, tt.want)
}
}
}
func TestToGrant(t *testing.T) {
t.Parallel()
type args struct {
username string
perm chronograf.Permission
}
tests := []struct {
name string
args args
want string
}{
{
name: "grant all for all dbs",
args: args{
username: "biff",
perm: chronograf.Permission{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ALL"},
},
},
want: `GRANT ALL PRIVILEGES TO "biff"`,
},
{
name: "grant all for one db",
args: args{
username: "biff",
perm: chronograf.Permission{
Scope: chronograf.DBScope,
Name: "gray_sports_almanac",
Allowed: chronograf.Allowances{"ALL"},
},
},
want: `GRANT ALL ON "gray_sports_almanac" TO "biff"`,
},
{
name: "bad allowance",
args: args{
username: "biff",
perm: chronograf.Permission{
Scope: chronograf.DBScope,
Name: "gray_sports_almanac",
Allowed: chronograf.Allowances{"bad"},
},
},
want: "",
},
}
for _, tt := range tests {
if got := ToGrant(tt.args.username, tt.args.perm); got != tt.want {
t.Errorf("%q. ToGrant() = %v, want %v", tt.name, got, tt.want)
}
}
}
func TestToRevoke(t *testing.T) {
t.Parallel()
type args struct {
username string
perm chronograf.Permission
}
tests := []struct {
name string
args args
want string
}{
{
name: "revoke all for all dbs",
args: args{
username: "biff",
perm: chronograf.Permission{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ALL"},
},
},
want: `REVOKE ALL PRIVILEGES FROM "biff"`,
},
{
name: "revoke all for one db",
args: args{
username: "biff",
perm: chronograf.Permission{
Scope: chronograf.DBScope,
Name: "pleasure_paradice",
Allowed: chronograf.Allowances{},
},
},
want: `REVOKE ALL PRIVILEGES ON "pleasure_paradice" FROM "biff"`,
},
}
for _, tt := range tests {
if got := ToRevoke(tt.args.username, tt.args.perm); got != tt.want {
t.Errorf("%q. ToRevoke() = %v, want %v", tt.name, got, tt.want)
}
}
}
func Test_showResults_Users(t *testing.T) {
t.Parallel()
tests := []struct {
name string
octets []byte
want []chronograf.User
}{
{
name: "admin and non-admin",
octets: []byte(`[{"series":[{"columns":["user","admin"],"values":[["admin",true],["reader",false]]}]}]`),
want: []chronograf.User{
{
Name: "admin",
Permissions: chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ALL"},
},
},
},
{
Name: "reader",
Permissions: chronograf.Permissions{},
},
},
},
{
name: "bad JSON",
octets: []byte(`[{"series":[{"columns":["user","admin"],"values":[[1,true],["reader","false"]]}]}]`),
want: []chronograf.User{},
},
}
for _, tt := range tests {
r := &showResults{}
json.Unmarshal(tt.octets, r)
if got := r.Users(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("%q. showResults.Users() = %v, want %v", tt.name, got, tt.want)
}
}
}
func Test_showResults_Permissions(t *testing.T) {
t.Parallel()
tests := []struct {
name string
octets []byte
want chronograf.Permissions
}{
{
name: "write for one db",
octets: []byte(`[{"series":[{"columns":["database","privilege"],"values":[["tensorflowdb","WRITE"]]}]}]`),
want: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "tensorflowdb",
Allowed: []string{"WRITE"},
},
},
},
{
name: "all for one db",
octets: []byte(`[{"series":[{"columns":["database","privilege"],"values":[["tensorflowdb","ALL PRIVILEGES"]]}]}]`),
want: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "tensorflowdb",
Allowed: []string{"WRITE", "READ"},
},
},
},
{
name: "read for one db",
octets: []byte(`[{"series":[{"columns":["database","privilege"],"values":[["tensorflowdb","READ"]]}]}]`),
want: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "tensorflowdb",
Allowed: []string{"READ"},
},
},
},
{
name: "other all for one db",
octets: []byte(`[{"series":[{"columns":["database","privilege"],"values":[["tensorflowdb","ALL"]]}]}]`),
want: chronograf.Permissions{
chronograf.Permission{
Scope: "database",
Name: "tensorflowdb",
Allowed: []string{"WRITE", "READ"},
},
},
},
{
name: "other all for one db",
octets: []byte(`[{"series":[{"columns":["database","privilege"],"values":[["tensorflowdb","NO PRIVILEGES"]]}]}]`),
want: chronograf.Permissions{},
},
{
name: "bad JSON",
octets: []byte(`[{"series":[{"columns":["database","privilege"],"values":[[1,"WRITE"]]}]}]`),
want: chronograf.Permissions{},
},
{
name: "bad JSON",
octets: []byte(`[{"series":[{"columns":["database","privilege"],"values":[["tensorflowdb",1]]}]}]`),
want: chronograf.Permissions{},
},
}
for _, tt := range tests {
r := &showResults{}
json.Unmarshal(tt.octets, r)
if got := r.Permissions(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("%q. showResults.Users() = %v, want %v", tt.name, got, tt.want)
}
}
}

View File

@ -1,491 +0,0 @@
package queries
import (
"encoding/json"
"errors"
"fmt"
"log"
"reflect"
"regexp"
"strings"
"time"
"github.com/influxdata/influxql"
)
type literalJSON struct {
Expr string `json:"expr"`
Val string `json:"val"`
Type string `json:"type"`
}
func ParseSelect(q string) (*SelectStatement, error) {
stmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement()
if err != nil {
return nil, err
}
s, ok := stmt.(*influxql.SelectStatement)
if !ok {
return nil, fmt.Errorf("error parsing query: not a SELECT statement")
}
return &SelectStatement{s}, nil
}
type BinaryExpr struct {
*influxql.BinaryExpr
}
func (b *BinaryExpr) MarshalJSON() ([]byte, error) {
octets, err := MarshalJSON(b.BinaryExpr.LHS)
if err != nil {
log.Fatalln(err)
return nil, err
}
lhs := json.RawMessage(octets)
octets, err = MarshalJSON(b.BinaryExpr.RHS)
if err != nil {
log.Fatalln(err)
return nil, err
}
rhs := json.RawMessage(octets)
return json.Marshal(struct {
Expr string `json:"expr"`
Op string `json:"op"`
LHS *json.RawMessage `json:"lhs"`
RHS *json.RawMessage `json:"rhs"`
}{"binary", b.Op.String(), &lhs, &rhs})
}
type Call struct {
*influxql.Call
}
func (c *Call) MarshalJSON() ([]byte, error) {
args := make([]json.RawMessage, len(c.Args))
for i, arg := range c.Args {
b, err := MarshalJSON(arg)
if err != nil {
return nil, err
}
args[i] = b
}
return json.Marshal(struct {
Expr string `json:"expr"`
Name string `json:"name"`
Args []json.RawMessage `json:"args,omitempty"`
}{"call", c.Name, args})
}
type Distinct struct {
*influxql.Distinct
}
func (d *Distinct) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`{"expr": "distinct", "val": "%s"}`, d.Val)), nil
}
type Fill struct {
Option influxql.FillOption
Value interface{}
}
func (f *Fill) MarshalJSON() ([]byte, error) {
var fill string
switch f.Option {
case influxql.NullFill:
fill = "null"
case influxql.NoFill:
fill = "none"
case influxql.PreviousFill:
fill = "previous"
case influxql.LinearFill:
fill = "linear"
case influxql.NumberFill:
fill = fmt.Sprintf("%v", f.Value)
}
return json.Marshal(fill)
}
type ParenExpr struct {
*influxql.ParenExpr
}
func (p *ParenExpr) MarshalJSON() ([]byte, error) {
expr, err := MarshalJSON(p.Expr)
if err != nil {
log.Fatalln(err)
return nil, err
}
return []byte(fmt.Sprintf(`{"expr": "paren", "val": %s}`, expr)), nil
}
func LiteralJSON(lit string, litType string) ([]byte, error) {
result := literalJSON{
Expr: "literal",
Val: lit,
Type: litType,
}
return json.Marshal(result)
}
type BooleanLiteral struct {
*influxql.BooleanLiteral
}
func (b *BooleanLiteral) MarshalJSON() ([]byte, error) {
return LiteralJSON(b.String(), "boolean")
}
type DurationLiteral struct {
*influxql.DurationLiteral
}
func (d *DurationLiteral) MarshalJSON() ([]byte, error) {
return LiteralJSON(d.String(), "duration")
}
type IntegerLiteral struct {
*influxql.IntegerLiteral
}
func (i *IntegerLiteral) MarshalJSON() ([]byte, error) {
return LiteralJSON(i.String(), "integer")
}
type NumberLiteral struct {
*influxql.NumberLiteral
}
func (n *NumberLiteral) MarshalJSON() ([]byte, error) {
return LiteralJSON(n.String(), "number")
}
type RegexLiteral struct {
*influxql.RegexLiteral
}
func (r *RegexLiteral) MarshalJSON() ([]byte, error) {
return LiteralJSON(r.String(), "regex")
}
// TODO: I don't think list is right
type ListLiteral struct {
*influxql.ListLiteral
}
func (l *ListLiteral) MarshalJSON() ([]byte, error) {
vals := make([]string, len(l.Vals))
for i, v := range l.Vals {
vals[i] = fmt.Sprintf(`"%s"`, v)
}
list := "[" + strings.Join(vals, ",") + "]"
return []byte(list), nil
}
type StringLiteral struct {
*influxql.StringLiteral
}
func (s *StringLiteral) MarshalJSON() ([]byte, error) {
return LiteralJSON(s.Val, "string")
}
type TimeLiteral struct {
*influxql.TimeLiteral
}
func (t *TimeLiteral) MarshalJSON() ([]byte, error) {
return LiteralJSON(t.Val.UTC().Format(time.RFC3339Nano), "time")
}
type VarRef struct {
*influxql.VarRef
}
func (v *VarRef) MarshalJSON() ([]byte, error) {
if v.Type != influxql.Unknown {
return []byte(fmt.Sprintf(`{"expr": "reference", "val": "%s", "type": "%s"}`, v.Val, v.Type.String())), nil
} else {
return []byte(fmt.Sprintf(`{"expr": "reference", "val": "%s"}`, v.Val)), nil
}
}
type Wildcard struct {
*influxql.Wildcard
}
func (w *Wildcard) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`{"expr": "wildcard", "val": "%s"}`, w.String())), nil
}
func MarshalJSON(v interface{}) ([]byte, error) {
switch v := v.(type) {
case *influxql.BinaryExpr:
return json.Marshal(&BinaryExpr{v})
case *influxql.BooleanLiteral:
return json.Marshal(&BooleanLiteral{v})
case *influxql.Call:
return json.Marshal(&Call{v})
case *influxql.Distinct:
return json.Marshal(&Distinct{v})
case *influxql.DurationLiteral:
return json.Marshal(&DurationLiteral{v})
case *influxql.IntegerLiteral:
return json.Marshal(&IntegerLiteral{v})
case *influxql.NumberLiteral:
return json.Marshal(&NumberLiteral{v})
case *influxql.ParenExpr:
return json.Marshal(&ParenExpr{v})
case *influxql.RegexLiteral:
return json.Marshal(&RegexLiteral{v})
case *influxql.ListLiteral:
return json.Marshal(&ListLiteral{v})
case *influxql.StringLiteral:
return json.Marshal(&StringLiteral{v})
case *influxql.TimeLiteral:
return json.Marshal(&TimeLiteral{v})
case *influxql.VarRef:
return json.Marshal(&VarRef{v})
case *influxql.Wildcard:
return json.Marshal(&Wildcard{v})
default:
t := reflect.TypeOf(v)
return nil, fmt.Errorf("error marshaling query: unknown type %s", t)
}
}
type Measurement struct {
Database string `json:"database"`
RetentionPolicy string `json:"retentionPolicy"`
Name string `json:"name,omitempty"`
Regex *regexp.Regexp `json:"regex,omitempty"`
Type string `json:"type"`
}
type Source struct {
influxql.Source
}
func (s *Source) MarshalJSON() ([]byte, error) {
switch src := s.Source.(type) {
case *influxql.Measurement:
m := Measurement{
Database: src.Database,
RetentionPolicy: src.RetentionPolicy,
Name: src.Name,
Type: "measurement",
}
if src.Regex != nil {
m.Regex = src.Regex.Val
}
return json.Marshal(m)
default:
return nil, fmt.Errorf("error marshaling source. Subqueries not supported yet")
}
}
type Sources struct {
influxql.Sources
}
// TODO: Handle subqueries
func (s *Sources) MarshalJSON() ([]byte, error) {
srcs := make([]Source, len(s.Sources))
for i, src := range s.Sources {
srcs[i] = Source{src}
}
return json.Marshal(srcs)
}
type Field struct {
*influxql.Field
}
func (f *Field) MarshalJSON() ([]byte, error) {
b, err := MarshalJSON(f.Expr)
if err != nil {
return nil, err
}
column := json.RawMessage(b)
return json.Marshal(struct {
Alias string `json:"alias,omitempty"`
Column *json.RawMessage `json:"column"`
}{f.Alias, &column})
}
type Fields struct {
influxql.Fields
}
func (f *Fields) MarshalJSON() ([]byte, error) {
fields := make([]Field, len(f.Fields))
for i, field := range f.Fields {
fields[i] = Field{field}
}
return json.Marshal(fields)
}
type Condition struct {
influxql.Expr
}
func (c *Condition) MarshalJSON() ([]byte, error) {
return MarshalJSON(c.Expr)
}
type SortField struct {
*influxql.SortField
}
func (s *SortField) MarshalJSON() ([]byte, error) {
var order string
if s.Ascending {
order = "ascending"
} else {
order = "descending"
}
return json.Marshal(struct {
Name string `json:"name,omitempty"`
Order string `json:"order,omitempty"`
}{s.Name, order})
}
type SortFields struct {
influxql.SortFields
}
func (f *SortFields) MarshalJSON() ([]byte, error) {
fields := make([]SortField, len(f.SortFields))
for i, field := range f.SortFields {
fields[i] = SortField{field}
}
return json.Marshal(fields)
}
type Limits struct {
Limit int `json:"limit,omitempty"`
Offset int `json:"offset,omitempty"`
SLimit int `json:"slimit,omitempty"`
SOffset int `json:"soffset,omitempty"`
}
type SelectStatement struct {
*influxql.SelectStatement
}
func (s *SelectStatement) MarshalJSON() ([]byte, error) {
stmt := map[string]interface{}{
"fields": &Fields{s.Fields},
"sources": &Sources{s.Sources},
}
if len(s.Dimensions) > 0 {
stmt["groupBy"] = &Dimensions{s.Dimensions, s.Fill, s.FillValue}
}
if s.Condition != nil {
stmt["condition"] = &Condition{s.Condition}
}
if s.Limit != 0 || s.Offset != 0 || s.SLimit != 0 || s.SOffset != 0 {
stmt["limits"] = &Limits{s.Limit, s.Offset, s.SLimit, s.SOffset}
}
if len(s.SortFields) > 0 {
stmt["orderbys"] = &SortFields{s.SortFields}
}
return json.Marshal(stmt)
}
type Dimension struct {
*influxql.Dimension
}
func (d *Dimension) MarshalJSON() ([]byte, error) {
switch v := d.Expr.(type) {
case *influxql.Call:
if v.Name != "time" {
return nil, errors.New("time dimension offset function must be now()")
}
// Make sure there is exactly one argument.
if got := len(v.Args); got < 1 || got > 2 {
return nil, errors.New("time dimension expected 1 or 2 arguments")
}
// Ensure the argument is a duration.
lit, ok := v.Args[0].(*influxql.DurationLiteral)
if !ok {
return nil, errors.New("time dimension must have duration argument")
}
var offset string
if len(v.Args) == 2 {
switch o := v.Args[1].(type) {
case *influxql.DurationLiteral:
offset = o.String()
case *influxql.Call:
if o.Name != "now" {
return nil, errors.New("time dimension offset function must be now()")
} else if len(o.Args) != 0 {
return nil, errors.New("time dimension offset now() function requires no arguments")
}
offset = "now()"
default:
return nil, errors.New("time dimension offset must be duration or now()")
}
}
return json.Marshal(struct {
Interval string `json:"interval"`
Offset string `json:"offset,omitempty"`
}{lit.String(), offset})
case *influxql.VarRef:
return json.Marshal(v.Val)
case *influxql.Wildcard:
return json.Marshal(v.String())
case *influxql.RegexLiteral:
return json.Marshal(v.String())
}
return MarshalJSON(d.Expr)
}
type Dimensions struct {
influxql.Dimensions
FillOption influxql.FillOption
FillValue interface{}
}
func (d *Dimensions) MarshalJSON() ([]byte, error) {
groupBys := struct {
Time *json.RawMessage `json:"time,omitempty"`
Tags []*json.RawMessage `json:"tags,omitempty"`
Fill *json.RawMessage `json:"fill,omitempty"`
}{}
for _, dim := range d.Dimensions {
switch dim.Expr.(type) {
case *influxql.Call:
octets, err := json.Marshal(&Dimension{dim})
if err != nil {
return nil, err
}
time := json.RawMessage(octets)
groupBys.Time = &time
default:
octets, err := json.Marshal(&Dimension{dim})
if err != nil {
return nil, err
}
tag := json.RawMessage(octets)
groupBys.Tags = append(groupBys.Tags, &tag)
}
}
if d.FillOption != influxql.NullFill {
octets, err := json.Marshal(&Fill{d.FillOption, d.FillValue})
if err != nil {
return nil, err
}
fill := json.RawMessage(octets)
groupBys.Fill = &fill
}
return json.Marshal(groupBys)
}

View File

@ -1,33 +0,0 @@
package queries
import (
"encoding/json"
"fmt"
"testing"
"time"
)
func TestSelect(t *testing.T) {
tests := []struct {
q string
}{
{q: fmt.Sprintf(`SELECT mean(field1), sum(field2) ,count(field3::field) AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' and time > '%s' GROUP BY time(10h) ORDER BY DESC LIMIT 20 OFFSET 10;`, time.Now().UTC().Format(time.RFC3339Nano))},
{q: fmt.Sprintf(`SELECT difference(max(field1)) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, time.Now().UTC().Format(time.RFC3339Nano))},
{q: `SELECT derivative(field1, 1h) / derivative(field2, 1h) FROM myseries`},
{q: `SELECT mean("load1") FROM "system" WHERE "cluster_id" =~ /^$ClusterID$/ AND time > now() - 1h GROUP BY time(10m), "host" fill(null)`},
{q: "SELECT max(\"n_cpus\") AS \"max_cpus\", non_negative_derivative(median(\"n_users\"), 5m) FROM \"system\" WHERE \"cluster_id\" =~ /^23/ AND \"host\" = 'prod-2ccccc04-us-east-1-data-3' AND time > now() - 15m GROUP BY time(15m, 10s),host,tag_x fill(10)"},
{q: "SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"default\".\"cpu\" WHERE host =~ /\\./ AND time > now() - 1h"},
{q: `SELECT 1 + "A" FROM howdy`},
}
for i, tt := range tests {
stmt, err := ParseSelect(tt.q)
if err != nil {
t.Errorf("Test %d query %s invalid statement: %v", i, tt.q, err)
}
_, err = json.MarshalIndent(stmt, "", " ")
if err != nil {
t.Errorf("Test %d query %s Unable to marshal statement: %v", i, tt.q, err)
}
}
}

View File

@ -1,537 +0,0 @@
package influx
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxql"
)
// TimeRangeAsEpochNano extracts the min and max epoch times from the expression
func TimeRangeAsEpochNano(expr influxql.Expr, now time.Time) (min, max int64, err error) {
// TODO(desa): is this OK?
_, trange, err := influxql.ConditionExpr(expr, nil)
if err != nil {
return 0, 0, err
}
tmin, tmax := trange.Min, trange.Max
if tmin.IsZero() {
min = time.Unix(0, influxql.MinTime).UnixNano()
} else {
min = tmin.UnixNano()
}
if tmax.IsZero() {
max = now.UnixNano()
} else {
max = tmax.UnixNano()
}
return
}
// WhereToken is used to parse the time expression from an influxql query
const WhereToken = "WHERE"
// ParseTime extracts the duration of the time range of the query
func ParseTime(influxQL string, now time.Time) (time.Duration, error) {
start := strings.Index(strings.ToUpper(influxQL), WhereToken)
if start == -1 {
return 0, fmt.Errorf("not a relative duration")
}
start += len(WhereToken)
where := influxQL[start:]
cond, err := influxql.ParseExpr(where)
if err != nil {
return 0, err
}
nowVal := &influxql.NowValuer{
Now: now,
}
cond = influxql.Reduce(cond, nowVal)
min, max, err := TimeRangeAsEpochNano(cond, now)
if err != nil {
return 0, err
}
dur := time.Duration(max - min)
if dur < 0 {
dur = 0
}
return dur, nil
}
// Convert changes an InfluxQL query to a QueryConfig
func Convert(influxQL string) (chronograf.QueryConfig, error) {
itsDashboardTime := false
intervalTime := false
if strings.Contains(influxQL, ":interval:") {
influxQL = strings.Replace(influxQL, ":interval:", "8675309ns", -1)
intervalTime = true
}
if strings.Contains(influxQL, ":dashboardTime:") {
influxQL = strings.Replace(influxQL, ":dashboardTime:", "now() - 15m", 1)
itsDashboardTime = true
}
query, err := influxql.ParseQuery(influxQL)
if err != nil {
return chronograf.QueryConfig{}, err
}
if itsDashboardTime {
influxQL = strings.Replace(influxQL, "now() - 15m", ":dashboardTime:", 1)
}
if intervalTime {
influxQL = strings.Replace(influxQL, "8675309ns", ":interval:", -1)
}
raw := chronograf.QueryConfig{
RawText: &influxQL,
Fields: []chronograf.Field{},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
Tags: make(map[string][]string),
}
qc := chronograf.QueryConfig{
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
Tags: make(map[string][]string),
}
if len(query.Statements) != 1 {
return raw, nil
}
stmt, ok := query.Statements[0].(*influxql.SelectStatement)
if !ok {
return raw, nil
}
// Query config doesn't support limits
if stmt.Limit != 0 || stmt.Offset != 0 || stmt.SLimit != 0 || stmt.SOffset != 0 {
return raw, nil
}
// Query config doesn't support sorting
if len(stmt.SortFields) > 0 {
return raw, nil
}
// Query config doesn't allow SELECT INTO
if stmt.Target != nil {
return raw, nil
}
// Query config only allows selecting from one source at a time.
if len(stmt.Sources) != 1 {
return raw, nil
}
src := stmt.Sources[0]
measurement, ok := src.(*influxql.Measurement)
if !ok {
return raw, nil
}
if measurement.Regex != nil {
return raw, nil
}
qc.Database = measurement.Database
qc.RetentionPolicy = measurement.RetentionPolicy
qc.Measurement = measurement.Name
for _, dim := range stmt.Dimensions {
switch v := dim.Expr.(type) {
default:
return raw, nil
case *influxql.Call:
if v.Name != "time" {
return raw, nil
}
// Make sure there is exactly one argument.
if len(v.Args) != 1 {
return raw, nil
}
// Ensure the argument is a duration.
lit, ok := v.Args[0].(*influxql.DurationLiteral)
if !ok {
return raw, nil
}
if intervalTime {
qc.GroupBy.Time = "auto"
} else {
qc.GroupBy.Time = lit.String()
}
// Add fill to queryConfig only if there's a `GROUP BY time`
switch stmt.Fill {
case influxql.NullFill:
qc.Fill = "null"
case influxql.NoFill:
qc.Fill = "none"
case influxql.NumberFill:
qc.Fill = fmt.Sprint(stmt.FillValue)
case influxql.PreviousFill:
qc.Fill = "previous"
case influxql.LinearFill:
qc.Fill = "linear"
default:
return raw, nil
}
case *influxql.VarRef:
qc.GroupBy.Tags = append(qc.GroupBy.Tags, v.Val)
}
}
qc.Fields = []chronograf.Field{}
for _, fld := range stmt.Fields {
switch f := fld.Expr.(type) {
default:
return raw, nil
case *influxql.Call:
// only support certain query config functions
if _, ok = supportedFuncs[f.Name]; !ok {
return raw, nil
}
fldArgs := []chronograf.Field{}
for _, arg := range f.Args {
switch ref := arg.(type) {
case *influxql.VarRef:
fldArgs = append(fldArgs, chronograf.Field{
Value: ref.Val,
Type: "field",
})
case *influxql.IntegerLiteral:
fldArgs = append(fldArgs, chronograf.Field{
Value: strconv.FormatInt(ref.Val, 10),
Type: "integer",
})
case *influxql.NumberLiteral:
fldArgs = append(fldArgs, chronograf.Field{
Value: strconv.FormatFloat(ref.Val, 'f', -1, 64),
Type: "number",
})
case *influxql.RegexLiteral:
fldArgs = append(fldArgs, chronograf.Field{
Value: ref.Val.String(),
Type: "regex",
})
case *influxql.Wildcard:
fldArgs = append(fldArgs, chronograf.Field{
Value: "*",
Type: "wildcard",
})
default:
return raw, nil
}
}
qc.Fields = append(qc.Fields, chronograf.Field{
Value: f.Name,
Type: "func",
Alias: fld.Alias,
Args: fldArgs,
})
case *influxql.VarRef:
if f.Type != influxql.Unknown {
return raw, nil
}
qc.Fields = append(qc.Fields, chronograf.Field{
Value: f.Val,
Type: "field",
Alias: fld.Alias,
})
}
}
if stmt.Condition == nil {
return qc, nil
}
reduced := influxql.Reduce(stmt.Condition, nil)
logic, ok := isTagLogic(reduced)
if !ok {
return raw, nil
}
ops := map[string]bool{}
for _, l := range logic {
values, ok := qc.Tags[l.Tag]
if !ok {
values = []string{}
}
ops[l.Op] = true
values = append(values, l.Value)
qc.Tags[l.Tag] = values
}
if len(logic) > 0 {
if len(ops) != 1 {
return raw, nil
}
if _, ok := ops["=="]; ok {
qc.AreTagsAccepted = true
}
}
// If the condition has a time range we report back its duration
if dur, ok := hasTimeRange(stmt.Condition); ok {
if !itsDashboardTime {
qc.Range = &chronograf.DurationRange{
Lower: "now() - " + shortDur(dur),
}
}
}
return qc, nil
}
// tagFilter represents a single tag that is filtered by some condition
type tagFilter struct {
Op string
Tag string
Value string
}
func isTime(exp influxql.Expr) bool {
if p, ok := exp.(*influxql.ParenExpr); ok {
return isTime(p.Expr)
} else if ref, ok := exp.(*influxql.VarRef); ok && strings.ToLower(ref.Val) == "time" {
return true
}
return false
}
func isNow(exp influxql.Expr) bool {
if p, ok := exp.(*influxql.ParenExpr); ok {
return isNow(p.Expr)
} else if call, ok := exp.(*influxql.Call); ok && strings.ToLower(call.Name) == "now" && len(call.Args) == 0 {
return true
}
return false
}
func isDuration(exp influxql.Expr) (time.Duration, bool) {
switch e := exp.(type) {
case *influxql.ParenExpr:
return isDuration(e.Expr)
case *influxql.DurationLiteral:
return e.Val, true
case *influxql.NumberLiteral, *influxql.IntegerLiteral, *influxql.TimeLiteral:
return 0, false
}
return 0, false
}
func isPreviousTime(exp influxql.Expr) (time.Duration, bool) {
if p, ok := exp.(*influxql.ParenExpr); ok {
return isPreviousTime(p.Expr)
} else if bin, ok := exp.(*influxql.BinaryExpr); ok {
now := isNow(bin.LHS) || isNow(bin.RHS) // either side can be now
op := bin.Op == influxql.SUB
dur, hasDur := isDuration(bin.LHS)
if !hasDur {
dur, hasDur = isDuration(bin.RHS)
}
return dur, now && op && hasDur
} else if isNow(exp) { // just comparing to now
return 0, true
}
return 0, false
}
func isTimeRange(exp influxql.Expr) (time.Duration, bool) {
if p, ok := exp.(*influxql.ParenExpr); ok {
return isTimeRange(p.Expr)
} else if bin, ok := exp.(*influxql.BinaryExpr); ok {
tm := isTime(bin.LHS) || isTime(bin.RHS) // Either side could be time
op := false
switch bin.Op {
case influxql.LT, influxql.LTE, influxql.GT, influxql.GTE:
op = true
}
dur, prev := isPreviousTime(bin.LHS)
if !prev {
dur, prev = isPreviousTime(bin.RHS)
}
return dur, tm && op && prev
}
return 0, false
}
func hasTimeRange(exp influxql.Expr) (time.Duration, bool) {
v := &timeRangeVisitor{}
influxql.Walk(v, exp)
return v.Duration, v.Ok
}
// timeRangeVisitor implements influxql.Visitor to search for time ranges
type timeRangeVisitor struct {
Duration time.Duration
Ok bool
}
func (v *timeRangeVisitor) Visit(n influxql.Node) influxql.Visitor {
if exp, ok := n.(influxql.Expr); !ok {
return nil
} else if dur, ok := isTimeRange(exp); ok {
v.Duration = dur
v.Ok = ok
return nil
}
return v
}
func isTagLogic(exp influxql.Expr) ([]tagFilter, bool) {
if p, ok := exp.(*influxql.ParenExpr); ok {
return isTagLogic(p.Expr)
}
if _, ok := isTimeRange(exp); ok {
return nil, true
} else if tf, ok := isTagFilter(exp); ok {
return []tagFilter{tf}, true
}
bin, ok := exp.(*influxql.BinaryExpr)
if !ok {
return nil, false
}
lhs, lhsOK := isTagFilter(bin.LHS)
rhs, rhsOK := isTagFilter(bin.RHS)
if lhsOK && rhsOK && lhs.Tag == rhs.Tag && lhs.Op == rhs.Op && bin.Op == influxql.OR {
return []tagFilter{lhs, rhs}, true
}
if bin.Op != influxql.AND && bin.Op != influxql.OR {
return nil, false
}
_, tm := isTimeRange(bin.LHS)
if !tm {
_, tm = isTimeRange(bin.RHS)
}
tf := lhsOK || rhsOK
if tm && tf {
if lhsOK {
return []tagFilter{lhs}, true
}
return []tagFilter{rhs}, true
}
tlLHS, lhsOK := isTagLogic(bin.LHS)
tlRHS, rhsOK := isTagLogic(bin.RHS)
if lhsOK && rhsOK {
ops := map[string]bool{} // there must only be one kind of ops
for _, tf := range tlLHS {
ops[tf.Op] = true
}
for _, tf := range tlRHS {
ops[tf.Op] = true
}
if len(ops) > 1 {
return nil, false
}
return append(tlLHS, tlRHS...), true
}
return nil, false
}
func isVarRef(exp influxql.Expr) bool {
if p, ok := exp.(*influxql.ParenExpr); ok {
return isVarRef(p.Expr)
} else if _, ok := exp.(*influxql.VarRef); ok {
return true
}
return false
}
func isString(exp influxql.Expr) bool {
if p, ok := exp.(*influxql.ParenExpr); ok {
return isString(p.Expr)
} else if _, ok := exp.(*influxql.StringLiteral); ok {
return true
}
return false
}
func isTagFilter(exp influxql.Expr) (tagFilter, bool) {
switch expr := exp.(type) {
default:
return tagFilter{}, false
case *influxql.ParenExpr:
return isTagFilter(expr.Expr)
case *influxql.BinaryExpr:
var Op string
if expr.Op == influxql.EQ {
Op = "=="
} else if expr.Op == influxql.NEQ {
Op = "!="
} else {
return tagFilter{}, false
}
hasValue := isString(expr.LHS) || isString(expr.RHS)
hasTag := isVarRef(expr.LHS) || isVarRef(expr.RHS)
if !(hasValue && hasTag) {
return tagFilter{}, false
}
value := ""
tag := ""
// Either tag op value or value op tag
if isVarRef(expr.LHS) {
t, _ := expr.LHS.(*influxql.VarRef)
tag = t.Val
v, _ := expr.RHS.(*influxql.StringLiteral)
value = v.Val
} else {
t, _ := expr.RHS.(*influxql.VarRef)
tag = t.Val
v, _ := expr.LHS.(*influxql.StringLiteral)
value = v.Val
}
return tagFilter{
Op: Op,
Tag: tag,
Value: value,
}, true
}
}
var supportedFuncs = map[string]bool{
"mean": true,
"median": true,
"count": true,
"min": true,
"max": true,
"sum": true,
"first": true,
"last": true,
"spread": true,
"stddev": true,
"percentile": true,
"top": true,
"bottom": true,
}
// shortDur converts duration into the queryConfig duration format
func shortDur(d time.Duration) string {
s := d.String()
if strings.HasSuffix(s, "m0s") {
s = s[:len(s)-2]
}
if strings.HasSuffix(s, "h0m") {
s = s[:len(s)-2]
}
return s
}

View File

@ -1,810 +0,0 @@
package influx
import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb/v2/chronograf"
)
func TestConvert(t *testing.T) {
tests := []struct {
name string
influxQL string
RawText string
want chronograf.QueryConfig
wantErr bool
}{
{
name: "Test field order",
influxQL: `SELECT "usage_idle", "usage_guest_nice", "usage_system", "usage_guest" FROM "telegraf"."autogen"."cpu" WHERE time > :dashboardTime:`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "usage_idle",
Type: "field",
},
{
Value: "usage_guest_nice",
Type: "field",
},
{
Value: "usage_system",
Type: "field",
},
{
Value: "usage_guest",
Type: "field",
},
},
Tags: map[string][]string{},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
},
},
{
name: "Test field function order",
influxQL: `SELECT mean("usage_idle"), median("usage_idle"), count("usage_guest_nice"), mean("usage_guest_nice") FROM "telegraf"."autogen"."cpu" WHERE time > :dashboardTime:`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "mean",
Type: "func",
Args: []chronograf.Field{
{
Value: "usage_idle",
Type: "field",
},
},
},
{
Value: "median",
Type: "func",
Args: []chronograf.Field{
{
Value: "usage_idle",
Type: "field",
},
},
},
{
Value: "count",
Type: "func",
Args: []chronograf.Field{
{
Value: "usage_guest_nice",
Type: "field",
},
},
},
{
Value: "mean",
Type: "func",
Args: []chronograf.Field{
{
Value: "usage_guest_nice",
Type: "field",
},
},
},
},
Tags: map[string][]string{},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
},
},
{
name: "Test named count field",
influxQL: `SELECT moving_average(mean("count"),14) FROM "usage_computed"."autogen".unique_clusters_by_day WHERE time > now() - 90d AND product = 'influxdb' group by time(1d)`,
RawText: `SELECT moving_average(mean("count"),14) FROM "usage_computed"."autogen".unique_clusters_by_day WHERE time > now() - 90d AND product = 'influxdb' group by time(1d)`,
want: chronograf.QueryConfig{
Fields: []chronograf.Field{},
Tags: map[string][]string{},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
},
},
{
name: "Test math",
influxQL: `SELECT count("event_id")/3 as "event_count_id" from discource.autogen.discourse_events where time > now() - 7d group by time(1d), "event_type"`,
RawText: `SELECT count("event_id")/3 as "event_count_id" from discource.autogen.discourse_events where time > now() - 7d group by time(1d), "event_type"`,
want: chronograf.QueryConfig{
Fields: []chronograf.Field{},
Tags: map[string][]string{},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
},
},
{
name: "Test range",
influxQL: `SELECT usage_user from telegraf.autogen.cpu where "host" != 'myhost' and time > now() - 15m`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "usage_user",
Type: "field",
},
},
Tags: map[string][]string{"host": {"myhost"}},
GroupBy: chronograf.GroupBy{
Time: "",
Tags: []string{},
},
AreTagsAccepted: false,
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
},
},
},
{
name: "Test invalid range",
influxQL: `SELECT usage_user from telegraf.autogen.cpu where "host" != 'myhost' and time > now() - 15`,
RawText: `SELECT usage_user from telegraf.autogen.cpu where "host" != 'myhost' and time > now() - 15`,
want: chronograf.QueryConfig{
Fields: []chronograf.Field{},
Tags: map[string][]string{},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
},
},
{
name: "Test range with no duration",
influxQL: `SELECT usage_user from telegraf.autogen.cpu where "host" != 'myhost' and time > now()`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "usage_user",
Type: "field",
},
},
Tags: map[string][]string{"host": {"myhost"}},
GroupBy: chronograf.GroupBy{
Time: "",
Tags: []string{},
},
AreTagsAccepted: false,
Range: &chronograf.DurationRange{
Lower: "now() - 0s",
},
},
},
{
name: "Test range with no tags",
influxQL: `SELECT usage_user from telegraf.autogen.cpu where time > now() - 15m`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Tags: map[string][]string{},
Fields: []chronograf.Field{
{
Value: "usage_user",
Type: "field",
},
},
GroupBy: chronograf.GroupBy{
Time: "",
Tags: []string{},
},
AreTagsAccepted: false,
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
},
},
},
{
name: "Test range with no tags nor duration",
influxQL: `SELECT usage_user from telegraf.autogen.cpu where time`,
RawText: `SELECT usage_user from telegraf.autogen.cpu where time`,
want: chronograf.QueryConfig{
Fields: []chronograf.Field{},
Tags: map[string][]string{},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
},
},
{
name: "Test with no time range",
influxQL: `SELECT usage_user from telegraf.autogen.cpu where "host" != 'myhost' and time`,
RawText: `SELECT usage_user from telegraf.autogen.cpu where "host" != 'myhost' and time`,
want: chronograf.QueryConfig{
Fields: []chronograf.Field{},
Tags: map[string][]string{},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
},
},
{
name: "Test with no where clauses",
influxQL: `SELECT usage_user from telegraf.autogen.cpu`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "usage_user",
Type: "field",
},
},
Tags: map[string][]string{},
GroupBy: chronograf.GroupBy{
Time: "",
Tags: []string{},
},
},
},
{
name: "Test tags accepted",
influxQL: `SELECT usage_user from telegraf.autogen.cpu where "host" = 'myhost' and time > now() - 15m`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "usage_user",
Type: "field",
},
},
Tags: map[string][]string{"host": {"myhost"}},
GroupBy: chronograf.GroupBy{
Time: "",
Tags: []string{},
},
AreTagsAccepted: true,
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
Upper: "",
},
},
},
{
name: "Test multible tags not accepted",
influxQL: `SELECT usage_user from telegraf.autogen.cpu where time > now() - 15m and "host" != 'myhost' and "cpu" != 'cpu-total'`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "usage_user",
Type: "field",
},
},
Tags: map[string][]string{
"host": {
"myhost",
},
"cpu": {
"cpu-total",
},
},
GroupBy: chronograf.GroupBy{
Time: "",
Tags: []string{},
},
AreTagsAccepted: false,
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
Upper: "",
},
},
},
{
name: "Test mixed tag logic",
influxQL: `SELECT usage_user from telegraf.autogen.cpu where ("host" = 'myhost' or "this" = 'those') and ("howdy" != 'doody') and time > now() - 15m`,
RawText: `SELECT usage_user from telegraf.autogen.cpu where ("host" = 'myhost' or "this" = 'those') and ("howdy" != 'doody') and time > now() - 15m`,
want: chronograf.QueryConfig{
Fields: []chronograf.Field{},
Tags: map[string][]string{},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
},
},
{
name: "Test tags accepted",
influxQL: `SELECT usage_user from telegraf.autogen.cpu where ("host" = 'myhost' OR "host" = 'yourhost') and ("these" = 'those') and time > now() - 15m`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "usage_user",
Type: "field",
},
},
Tags: map[string][]string{
"host": {"myhost", "yourhost"},
"these": {"those"},
},
GroupBy: chronograf.GroupBy{
Time: "",
Tags: []string{},
},
AreTagsAccepted: true,
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
},
},
},
{
name: "Complex Logic with tags not accepted",
influxQL: `SELECT "usage_idle", "usage_guest_nice", "usage_system", "usage_guest" FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m AND ("cpu"!='cpu-total' OR "cpu"!='cpu0') AND ("host"!='dev-052978d6-us-east-2-meta-0' OR "host"!='dev-052978d6-us-east-2-data-5' OR "host"!='dev-052978d6-us-east-2-data-4' OR "host"!='dev-052978d6-us-east-2-data-3')`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "usage_idle",
Type: "field",
},
{
Value: "usage_guest_nice",
Type: "field",
},
{
Value: "usage_system",
Type: "field",
},
{
Value: "usage_guest",
Type: "field",
},
},
Tags: map[string][]string{
"host": {
"dev-052978d6-us-east-2-meta-0",
"dev-052978d6-us-east-2-data-5",
"dev-052978d6-us-east-2-data-4",
"dev-052978d6-us-east-2-data-3",
},
"cpu": {
"cpu-total",
"cpu0",
},
},
GroupBy: chronograf.GroupBy{
Time: "",
Tags: []string{},
},
AreTagsAccepted: false,
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
},
},
},
{
name: "Complex Logic with tags accepted",
influxQL: `SELECT "usage_idle", "usage_guest_nice", "usage_system", "usage_guest" FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m AND ("cpu" = 'cpu-total' OR "cpu" = 'cpu0') AND ("host" = 'dev-052978d6-us-east-2-meta-0' OR "host" = 'dev-052978d6-us-east-2-data-5' OR "host" = 'dev-052978d6-us-east-2-data-4' OR "host" = 'dev-052978d6-us-east-2-data-3')`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "usage_idle",
Type: "field",
},
{
Value: "usage_guest_nice",
Type: "field",
},
{
Value: "usage_system",
Type: "field",
},
{
Value: "usage_guest",
Type: "field",
},
},
Tags: map[string][]string{
"host": {
"dev-052978d6-us-east-2-meta-0",
"dev-052978d6-us-east-2-data-5",
"dev-052978d6-us-east-2-data-4",
"dev-052978d6-us-east-2-data-3",
},
"cpu": {
"cpu-total",
"cpu0",
},
},
GroupBy: chronograf.GroupBy{
Time: "",
Tags: []string{},
},
AreTagsAccepted: true,
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
},
},
},
{
name: "Test explicit non-null fill accepted",
influxQL: `SELECT mean("usage_idle") FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m GROUP BY time(1m) FILL(linear)`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "mean",
Type: "func",
Args: []chronograf.Field{
{
Value: "usage_idle",
Type: "field",
},
},
},
},
GroupBy: chronograf.GroupBy{
Time: "1m",
Tags: []string{},
},
Tags: map[string][]string{},
AreTagsAccepted: false,
Fill: "linear",
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
},
},
},
{
name: "Test explicit null fill accepted",
influxQL: `SELECT mean("usage_idle") FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m GROUP BY time(1m) FILL(null)`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "mean",
Type: "func",
Args: []chronograf.Field{
{
Value: "usage_idle",
Type: "field",
},
},
},
},
GroupBy: chronograf.GroupBy{
Time: "1m",
Tags: []string{},
},
Tags: map[string][]string{},
AreTagsAccepted: false,
Fill: "null",
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
},
},
},
{
name: "Test implicit null fill accepted and made explicit",
influxQL: `SELECT mean("usage_idle") as "mean_usage_idle" FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m GROUP BY time(1m)`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "mean",
Type: "func",
Alias: "mean_usage_idle",
Args: []chronograf.Field{
{
Value: "usage_idle",
Type: "field",
},
},
},
},
GroupBy: chronograf.GroupBy{
Time: "1m",
Tags: []string{},
},
Tags: map[string][]string{},
AreTagsAccepted: false,
Fill: "null",
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
},
},
},
{
name: "Test percentile with a number parameter",
influxQL: `SELECT percentile("usage_idle", 3.14) as "mean_usage_idle" FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m GROUP BY time(1m)`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "percentile",
Type: "func",
Alias: "mean_usage_idle",
Args: []chronograf.Field{
{
Value: "usage_idle",
Type: "field",
},
{
Value: "3.14",
Type: "number",
},
},
},
},
GroupBy: chronograf.GroupBy{
Time: "1m",
Tags: []string{},
},
Tags: map[string][]string{},
AreTagsAccepted: false,
Fill: "null",
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
},
},
},
{
name: "Test top with 2 arguments",
influxQL: `SELECT TOP("water_level","location",2) FROM "h2o_feet"`,
want: chronograf.QueryConfig{
Measurement: "h2o_feet",
Fields: []chronograf.Field{
{
Value: "top",
Type: "func",
Args: []chronograf.Field{
{
Value: "water_level",
Type: "field",
},
{
Value: "location",
Type: "field",
},
{
Value: "2",
Type: "integer",
},
},
},
},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
Tags: map[string][]string{},
AreTagsAccepted: false,
},
},
{
name: "count of a regex",
influxQL: ` SELECT COUNT(/water/) FROM "h2o_feet"`,
want: chronograf.QueryConfig{
Measurement: "h2o_feet",
Fields: []chronograf.Field{
{
Value: "count",
Type: "func",
Args: []chronograf.Field{
{
Value: "water",
Type: "regex",
},
},
},
},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
Tags: map[string][]string{},
AreTagsAccepted: false,
},
},
{
name: "count with aggregate",
influxQL: `SELECT COUNT(water) as "count_water" FROM "h2o_feet"`,
want: chronograf.QueryConfig{
Measurement: "h2o_feet",
Fields: []chronograf.Field{
{
Value: "count",
Type: "func",
Alias: "count_water",
Args: []chronograf.Field{
{
Value: "water",
Type: "field",
},
},
},
},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
Tags: map[string][]string{},
AreTagsAccepted: false,
},
},
{
name: "count of a wildcard",
influxQL: ` SELECT COUNT(*) FROM "h2o_feet"`,
want: chronograf.QueryConfig{
Measurement: "h2o_feet",
Fields: []chronograf.Field{
{
Value: "count",
Type: "func",
Args: []chronograf.Field{
{
Value: "*",
Type: "wildcard",
},
},
},
},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
Tags: map[string][]string{},
AreTagsAccepted: false,
},
},
{
name: "Test fill number (int) accepted",
influxQL: `SELECT mean("usage_idle") FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m GROUP BY time(1m) FILL(1337)`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "mean",
Type: "func",
Args: []chronograf.Field{
{
Value: "usage_idle",
Type: "field",
},
},
},
},
GroupBy: chronograf.GroupBy{
Time: "1m",
Tags: []string{},
},
Tags: map[string][]string{},
AreTagsAccepted: false,
Fill: "1337",
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
},
},
},
{
name: "Test fill number (float) accepted",
influxQL: `SELECT mean("usage_idle") FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m GROUP BY time(1m) FILL(1.337)`,
want: chronograf.QueryConfig{
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "mean",
Type: "func",
Args: []chronograf.Field{
{
Value: "usage_idle",
Type: "field",
},
},
},
},
GroupBy: chronograf.GroupBy{
Time: "1m",
Tags: []string{},
},
Tags: map[string][]string{},
AreTagsAccepted: false,
Fill: "1.337",
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
},
},
},
{
name: "Test invalid fill rejected",
influxQL: `SELECT mean("usage_idle") FROM "telegraf"."autogen"."cpu" WHERE time > now() - 15m GROUP BY time(1m) FILL(LINEAR)`,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := Convert(tt.influxQL)
if (err != nil) != tt.wantErr {
t.Errorf("Convert() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.RawText != "" {
tt.want.RawText = &tt.RawText
if got.RawText == nil {
t.Errorf("Convert() = nil, want %s", tt.RawText)
} else if *got.RawText != tt.RawText {
t.Errorf("Convert() = %s, want %s", *got.RawText, tt.RawText)
}
}
if !cmp.Equal(got, tt.want) {
t.Errorf("Convert() = %s", cmp.Diff(got, tt.want))
}
})
}
}
func TestParseTime(t *testing.T) {
tests := []struct {
name string
influxQL string
now string
want time.Duration
wantErr bool
}{
{
name: "time equal",
now: "2000-01-01T00:00:00Z",
influxQL: `SELECT mean("numSeries") AS "mean_numSeries" FROM "_internal"."monitor"."database" WHERE time > now() - 1h and time < now() - 1h GROUP BY :interval: FILL(null);`,
want: 0,
},
{
name: "time shifted by one hour",
now: "2000-01-01T00:00:00Z",
influxQL: `SELECT mean("numSeries") AS "mean_numSeries" FROM "_internal"."monitor"."database" WHERE time > now() - 1h - 1h and time < now() - 1h GROUP BY :interval: FILL(null);`,
want: 3599999999998,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
now, err := time.Parse(time.RFC3339, tt.now)
if err != nil {
t.Fatalf("%v", err)
}
got, err := ParseTime(tt.influxQL, now)
if (err != nil) != tt.wantErr {
t.Errorf("ParseTime() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Logf("%d", got)
t.Errorf("ParseTime() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -1,230 +0,0 @@
package influx
import (
"context"
"encoding/json"
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
)
// Add a new User in InfluxDB
func (c *Client) Add(ctx context.Context, u *chronograf.User) (*chronograf.User, error) {
_, err := c.Query(ctx, chronograf.Query{
Command: fmt.Sprintf(`CREATE USER "%s" WITH PASSWORD '%s'`, u.Name, u.Passwd),
})
if err != nil {
return nil, err
}
for _, p := range u.Permissions {
if err := c.grantPermission(ctx, u.Name, p); err != nil {
return nil, err
}
}
return c.Get(ctx, chronograf.UserQuery{Name: &u.Name})
}
// Delete the User from InfluxDB
func (c *Client) Delete(ctx context.Context, u *chronograf.User) error {
res, err := c.Query(ctx, chronograf.Query{
Command: fmt.Sprintf(`DROP USER "%s"`, u.Name),
})
if err != nil {
return err
}
// The DROP USER statement puts the error within the results itself
// So, we have to crack open the results to see what happens
octets, err := res.MarshalJSON()
if err != nil {
return err
}
results := make([]struct{ Error string }, 0)
if err := json.Unmarshal(octets, &results); err != nil {
return err
}
// At last, we can check if there are any error strings
for _, r := range results {
if r.Error != "" {
return fmt.Errorf(r.Error)
}
}
return nil
}
// Get retrieves a user if name exists.
func (c *Client) Get(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) {
if q.Name == nil {
return nil, fmt.Errorf("query must specify name")
}
users, err := c.showUsers(ctx)
if err != nil {
return nil, err
}
for _, user := range users {
if user.Name == *q.Name {
perms, err := c.userPermissions(ctx, user.Name)
if err != nil {
return nil, err
}
user.Permissions = append(user.Permissions, perms...)
return &user, nil
}
}
return nil, fmt.Errorf("user not found")
}
// Update the user's permissions or roles
func (c *Client) Update(ctx context.Context, u *chronograf.User) error {
// Only allow one type of change at a time. If it is a password
// change then do it and return without any changes to permissions
if u.Passwd != "" {
return c.updatePassword(ctx, u.Name, u.Passwd)
}
user, err := c.Get(ctx, chronograf.UserQuery{Name: &u.Name})
if err != nil {
return err
}
revoke, add := Difference(u.Permissions, user.Permissions)
for _, a := range add {
if err := c.grantPermission(ctx, u.Name, a); err != nil {
return err
}
}
for _, r := range revoke {
if err := c.revokePermission(ctx, u.Name, r); err != nil {
return err
}
}
return nil
}
// All users in influx
func (c *Client) All(ctx context.Context) ([]chronograf.User, error) {
users, err := c.showUsers(ctx)
if err != nil {
return nil, err
}
// For all users we need to look up permissions to add to the user.
for i, user := range users {
perms, err := c.userPermissions(ctx, user.Name)
if err != nil {
return nil, err
}
user.Permissions = append(user.Permissions, perms...)
users[i] = user
}
return users, nil
}
// Num is the number of users in DB
func (c *Client) Num(ctx context.Context) (int, error) {
all, err := c.All(ctx)
if err != nil {
return 0, err
}
return len(all), nil
}
// showUsers runs SHOW USERS InfluxQL command and returns chronograf users.
func (c *Client) showUsers(ctx context.Context) ([]chronograf.User, error) {
res, err := c.Query(ctx, chronograf.Query{
Command: `SHOW USERS`,
})
if err != nil {
return nil, err
}
octets, err := res.MarshalJSON()
if err != nil {
return nil, err
}
results := showResults{}
if err := json.Unmarshal(octets, &results); err != nil {
return nil, err
}
return results.Users(), nil
}
func (c *Client) grantPermission(ctx context.Context, username string, perm chronograf.Permission) error {
query := ToGrant(username, perm)
if query == "" {
return nil
}
_, err := c.Query(ctx, chronograf.Query{
Command: query,
})
return err
}
func (c *Client) revokePermission(ctx context.Context, username string, perm chronograf.Permission) error {
query := ToRevoke(username, perm)
if query == "" {
return nil
}
_, err := c.Query(ctx, chronograf.Query{
Command: query,
})
return err
}
func (c *Client) userPermissions(ctx context.Context, name string) (chronograf.Permissions, error) {
res, err := c.Query(ctx, chronograf.Query{
Command: fmt.Sprintf(`SHOW GRANTS FOR "%s"`, name),
})
if err != nil {
return nil, err
}
octets, err := res.MarshalJSON()
if err != nil {
return nil, err
}
results := showResults{}
if err := json.Unmarshal(octets, &results); err != nil {
return nil, err
}
return results.Permissions(), nil
}
func (c *Client) updatePassword(ctx context.Context, name, passwd string) error {
res, err := c.Query(ctx, chronograf.Query{
Command: fmt.Sprintf(`SET PASSWORD for "%s" = '%s'`, name, passwd),
})
if err != nil {
return err
}
// The SET PASSWORD statements puts the error within the results itself
// So, we have to crack open the results to see what happens
octets, err := res.MarshalJSON()
if err != nil {
return err
}
results := make([]struct{ Error string }, 0)
if err := json.Unmarshal(octets, &results); err != nil {
return err
}
// At last, we can check if there are any error strings
for _, r := range results {
if r.Error != "" {
return fmt.Errorf(r.Error)
}
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,159 +0,0 @@
package chronograf
import "encoding/json"
// AlertNodes defines all possible kapacitor interactions with an alert.
type AlertNodes struct {
IsStateChangesOnly bool `json:"stateChangesOnly"` // IsStateChangesOnly will only send alerts on state changes.
UseFlapping bool `json:"useFlapping"` // UseFlapping enables flapping detection. Flapping occurs when a service or host changes state too frequently, resulting in a storm of problem and recovery notification
Posts []*Post `json:"post"` // HTTPPost will post the JSON alert data to the specified URLs.
TCPs []*TCP `json:"tcp"` // TCP will send the JSON alert data to the specified endpoint via TCP.
Email []*Email `json:"email"` // Email will send alert data to the specified emails.
Exec []*Exec `json:"exec"` // Exec will run shell commands when an alert triggers
Log []*Log `json:"log"` // Log will log JSON alert data to files in JSON lines format.
VictorOps []*VictorOps `json:"victorOps"` // VictorOps will send alert to all VictorOps
PagerDuty []*PagerDuty `json:"pagerDuty"` // PagerDuty will send alert to all PagerDuty
PagerDuty2 []*PagerDuty `json:"pagerDuty2"` // PagerDuty2 will send alert to PagerDuty v2
Pushover []*Pushover `json:"pushover"` // Pushover will send alert to all Pushover
Sensu []*Sensu `json:"sensu"` // Sensu will send alert to all Sensu
Slack []*Slack `json:"slack"` // Slack will send alert to Slack
Telegram []*Telegram `json:"telegram"` // Telegram will send alert to all Telegram
HipChat []*HipChat `json:"hipChat"` // HipChat will send alert to all HipChat
Alerta []*Alerta `json:"alerta"` // Alerta will send alert to all Alerta
OpsGenie []*OpsGenie `json:"opsGenie"` // OpsGenie will send alert to all OpsGenie
OpsGenie2 []*OpsGenie `json:"opsGenie2"` // OpsGenie2 will send alert to all OpsGenie v2
Talk []*Talk `json:"talk"` // Talk will send alert to all Talk
Kafka []*Kafka `json:"kafka"` // Kafka will send alert to all Kafka
}
// Post will POST alerts to a destination URL
type Post struct {
URL string `json:"url"` // URL is the destination of the POST.
Headers map[string]string `json:"headers"` // Headers are added to the output POST
}
// Log sends the output of the alert to a file
type Log struct {
FilePath string `json:"filePath"` // Absolute path the the log file; it will be created if it does not exist.
}
// Alerta sends the output of the alert to an alerta service
type Alerta struct {
Token string `json:"token"` // Token is the authentication token that overrides the global configuration.
Resource string `json:"resource"` // Resource under alarm, deliberately not host-centric
Event string `json:"event"` // Event is the event name eg. NodeDown, QUEUE:LENGTH:EXCEEDED
Environment string `json:"environment"` // Environment is the effected environment; used to namespace the resource
Group string `json:"group"` // Group is an event group used to group events of similar type
Value string `json:"value"` // Value is the event value eg. 100%, Down, PingFail, 55ms, ORA-1664
Origin string `json:"origin"` // Origin is the name of monitoring component that generated the alert
Service []string `json:"service"` // Service is the list of affected services
}
// Exec executes a shell command on an alert
type Exec struct {
Command []string `json:"command"` // Command is the space separated command and args to execute.
}
// TCP sends the alert to the address
type TCP struct {
Address string `json:"address"` // Endpoint is the Address and port to send the alert
}
// Email sends the alert to a list of email addresses
type Email struct {
To []string `json:"to"` // ToList is the list of email recipients.
}
// VictorOps sends alerts to the victorops.com service
type VictorOps struct {
RoutingKey string `json:"routingKey"` // RoutingKey is what is used to map the alert to a team
}
// PagerDuty sends alerts to the pagerduty.com service
type PagerDuty struct {
ServiceKey string `json:"serviceKey"` // ServiceKey is the GUID of one of the "Generic API" integrations
}
// HipChat sends alerts to stride.com
type HipChat struct {
Room string `json:"room"` // Room is the HipChat room to post messages.
Token string `json:"token"` // Token is the HipChat authentication token.
}
// Sensu sends alerts to sensu or sensuapp.org
type Sensu struct {
Source string `json:"source"` // Source is the check source, used to create a proxy client for an external resource
Handlers []string `json:"handlers"` // Handlers are Sensu event handlers are for taking action on events
}
// Pushover sends alerts to pushover.net
type Pushover struct {
// UserKey is the User/Group key of your user (or you), viewable when logged
// into the Pushover dashboard. Often referred to as USER_KEY
// in the Pushover documentation.
UserKey string `json:"userKey"`
// Device is the users device name to send message directly to that device,
// rather than all of a user's devices (multiple device names may
// be separated by a comma)
Device string `json:"device"`
// Title is your message's title, otherwise your apps name is used
Title string `json:"title"`
// URL is a supplementary URL to show with your message
URL string `json:"url"`
// URLTitle is a title for your supplementary URL, otherwise just URL is shown
URLTitle string `json:"urlTitle"`
// Sound is the name of one of the sounds supported by the device clients to override
// the user's default sound choice
Sound string `json:"sound"`
}
// Slack sends alerts to a slack.com channel
type Slack struct {
Channel string `json:"channel"` // Slack channel in which to post messages.
Username string `json:"username"` // Username of the Slack bot.
IconEmoji string `json:"iconEmoji"` // IconEmoji is an emoji name surrounded in ':' characters; The emoji image will replace the normal user icon for the slack bot.
Workspace string `json:"workspace"` // Workspace is the slack workspace for the alert handler
}
// Telegram sends alerts to telegram.org
type Telegram struct {
ChatID string `json:"chatId"` // ChatID is the Telegram user/group ID to post messages to.
ParseMode string `json:"parseMode"` // ParseMode tells telegram how to render the message (Markdown or HTML)
DisableWebPagePreview bool `json:"disableWebPagePreview"` // IsDisableWebPagePreview will disables link previews in alert messages.
DisableNotification bool `json:"disableNotification"` // IsDisableNotification will disables notifications on iOS devices and disables sounds on Android devices. Android users continue to receive notifications.
}
// OpsGenie sends alerts to opsgenie.com
type OpsGenie struct {
Teams []string `json:"teams"` // Teams that the alert will be routed to send notifications
Recipients []string `json:"recipients"` // Recipients can be a single user, group, escalation, or schedule (https://docs.opsgenie.com/docs/alert-recipients-and-teams)
}
// Talk sends alerts to Jane Talk (https://jianliao.com/site)
type Talk struct{}
// Kafka sends alerts to any Kafka brokers specified in the handler config
type Kafka struct {
Cluster string `json:"cluster"`
Topic string `json:"kafka-topic"`
Template string `json:"template"`
}
// MarshalJSON converts AlertNodes to JSON
func (n *AlertNodes) MarshalJSON() ([]byte, error) {
type Alias AlertNodes
var raw = &struct {
Type string `json:"typeOf"`
*Alias
}{
Type: "alert",
Alias: (*Alias)(n),
}
return json.Marshal(raw)
}

View File

@ -1,88 +0,0 @@
package mocks
import (
"fmt"
"io"
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
)
// NewLogger returns a mock logger that implements chronograf.Logger
func NewLogger() chronograf.Logger {
return &TestLogger{}
}
type LogMessage struct {
Level string
Body string
}
// TestLogger is a chronograf.Logger which allows assertions to be made on the
// contents of its messages.
type TestLogger struct {
Messages []LogMessage
}
func (tl *TestLogger) Debug(args ...interface{}) {
tl.Messages = append(tl.Messages, LogMessage{"debug", tl.stringify(args...)})
}
func (tl *TestLogger) Info(args ...interface{}) {
tl.Messages = append(tl.Messages, LogMessage{"info", tl.stringify(args...)})
}
func (tl *TestLogger) Error(args ...interface{}) {
tl.Messages = append(tl.Messages, LogMessage{"error", tl.stringify(args...)})
}
func (tl *TestLogger) WithField(key string, value interface{}) chronograf.Logger {
return tl
}
func (tl *TestLogger) Writer() *io.PipeWriter {
_, write := io.Pipe()
return write
}
// HasMessage will return true if the TestLogger has been called with an exact
// match of a particular log message at a particular log level
func (tl *TestLogger) HasMessage(level string, body string) bool {
for _, msg := range tl.Messages {
if msg.Level == level && msg.Body == body {
return true
}
}
return false
}
func (tl *TestLogger) stringify(args ...interface{}) string {
out := []byte{}
for _, arg := range args[:len(args)-1] {
out = append(out, tl.stringifyArg(arg)...)
out = append(out, []byte(" ")...)
}
out = append(out, tl.stringifyArg(args[len(args)-1])...)
return string(out)
}
func (tl *TestLogger) stringifyArg(arg interface{}) []byte {
switch a := arg.(type) {
case fmt.Stringer:
return []byte(a.String())
case error:
return []byte(a.Error())
case string:
return []byte(a)
default:
return []byte("UNKNOWN")
}
}
// Dump dumps out logs into a given testing.T's logs
func (tl *TestLogger) Dump(t *testing.T) {
t.Log("== Dumping Test Logs ==")
for _, msg := range tl.Messages {
t.Logf("lvl: %s, msg: %s", msg.Level, msg.Body)
}
}

View File

@ -1,20 +0,0 @@
package mocks
// NewResponse returns a mocked chronograf.Response
func NewResponse(res string, err error) *Response {
return &Response{
res: res,
err: err,
}
}
// Response is a mocked chronograf.Response
type Response struct {
res string
err error
}
// MarshalJSON returns the res and err as the fake response.
func (r *Response) MarshalJSON() ([]byte, error) {
return []byte(r.res), r.err
}

View File

@ -1,60 +0,0 @@
package mocks
import (
"context"
"github.com/influxdata/influxdb/v2/chronograf"
)
var _ chronograf.TimeSeries = &TimeSeries{}
// TimeSeries is a mockable chronograf time series by overriding the functions.
type TimeSeries struct {
// Connect will connect to the time series using the information in `Source`.
ConnectF func(context.Context, *chronograf.Source) error
// Query retrieves time series data from the database.
QueryF func(context.Context, chronograf.Query) (chronograf.Response, error)
// Write records points into the TimeSeries
WriteF func(context.Context, []chronograf.Point) error
// UsersStore represents the user accounts within the TimeSeries database
UsersF func(context.Context) chronograf.UsersStore
// Permissions returns all valid names permissions in this database
PermissionsF func(context.Context) chronograf.Permissions
// RolesF represents the roles. Roles group permissions and Users
RolesF func(context.Context) (chronograf.RolesStore, error)
}
// New implements TimeSeriesClient
func (t *TimeSeries) New(chronograf.Source, chronograf.Logger) (chronograf.TimeSeries, error) {
return t, nil
}
// Connect will connect to the time series using the information in `Source`.
func (t *TimeSeries) Connect(ctx context.Context, src *chronograf.Source) error {
return t.ConnectF(ctx, src)
}
// Query retrieves time series data from the database.
func (t *TimeSeries) Query(ctx context.Context, query chronograf.Query) (chronograf.Response, error) {
return t.QueryF(ctx, query)
}
// Write records a point into the time series
func (t *TimeSeries) Write(ctx context.Context, points []chronograf.Point) error {
return t.WriteF(ctx, points)
}
// Users represents the user accounts within the TimeSeries database
func (t *TimeSeries) Users(ctx context.Context) chronograf.UsersStore {
return t.UsersF(ctx)
}
// Roles represents the roles. Roles group permissions and Users
func (t *TimeSeries) Roles(ctx context.Context) (chronograf.RolesStore, error) {
return t.RolesF(ctx)
}
// Permissions returns all valid names permissions in this database
func (t *TimeSeries) Permissions(ctx context.Context) chronograf.Permissions {
return t.PermissionsF(ctx)
}

View File

@ -763,7 +763,6 @@ func (m *Launcher) run(ctx context.Context, opts *InfluxdOpts) (err error) {
HTTPErrorHandler: kithttp.ErrorHandler(0),
Logger: m.log,
SessionRenewDisabled: opts.SessionRenewDisabled,
NewBucketService: source.NewBucketService,
NewQueryService: source.NewQueryService,
PointsWriter: &storage.LoggingPointsWriter{
Underlying: pointsWriter,

2
go.mod
View File

@ -70,7 +70,7 @@ require (
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.9.1
github.com/retailnext/hllpp v1.0.0
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect
github.com/spf13/cast v1.3.0
github.com/spf13/cobra v1.0.0
github.com/spf13/pflag v1.0.5

View File

@ -54,8 +54,7 @@ type APIBackend struct {
// write request. A value of zero specifies there is no limit.
WriteParserMaxValues int
NewBucketService func(*influxdb.Source) (influxdb.BucketService, error)
NewQueryService func(*influxdb.Source) (query.ProxyQueryService, error)
NewQueryService func(*influxdb.Source) (query.ProxyQueryService, error)
WriteEventRecorder metric.EventRecorder
QueryEventRecorder metric.EventRecorder

View File

@ -1,80 +0,0 @@
package influxdb
import (
"context"
"fmt"
"time"
platform2 "github.com/influxdata/influxdb/v2/kit/platform"
platform "github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/kit/tracing"
)
// BucketService connects to Influx via HTTP using tokens to manage buckets
type BucketService struct {
Source *platform.Source
}
func (s *BucketService) FindBucketByName(ctx context.Context, orgID platform2.ID, n string) (*platform.Bucket, error) {
return nil, fmt.Errorf("not supported")
}
func (s *BucketService) FindBucketByID(ctx context.Context, id platform2.ID) (*platform.Bucket, error) {
return nil, fmt.Errorf("not supported")
}
func (s *BucketService) FindBucket(ctx context.Context, filter platform.BucketFilter) (*platform.Bucket, error) {
return nil, fmt.Errorf("not supported")
}
func (s *BucketService) FindBuckets(ctx context.Context, filter platform.BucketFilter, opt ...platform.FindOptions) ([]*platform.Bucket, int, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
c, err := newClient(s.Source)
if err != nil {
return nil, 0, err
}
dbs, err := c.AllDB(ctx)
if err != nil {
return nil, 0, err
}
bs := []*platform.Bucket{}
for _, db := range dbs {
rps, err := c.AllRP(ctx, db.Name)
if err != nil {
return nil, 0, err
}
for _, rp := range rps {
d, err := time.ParseDuration(rp.Duration)
if err != nil {
return nil, 0, err
}
b := &platform.Bucket{
// TODO(desa): what to do about IDs?
RetentionPeriod: d,
Name: db.Name,
RetentionPolicyName: rp.Name,
}
bs = append(bs, b)
}
}
return bs, len(bs), nil
}
func (s *BucketService) CreateBucket(ctx context.Context, b *platform.Bucket) error {
return fmt.Errorf("not supported")
}
func (s *BucketService) UpdateBucket(ctx context.Context, id platform2.ID, upd platform.BucketUpdate) (*platform.Bucket, error) {
return nil, fmt.Errorf("not supported")
}
func (s *BucketService) DeleteBucket(ctx context.Context, id platform2.ID) error {
return fmt.Errorf("not supported")
}

View File

@ -5,9 +5,6 @@ import (
"net/http"
"net/url"
platform "github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/influx"
"github.com/influxdata/influxdb/v2/kit/tracing"
)
@ -19,37 +16,6 @@ var (
defaultTransport = &http.Transport{}
)
func newClient(s *platform.Source) (*influx.Client, error) {
c := &influx.Client{}
url, err := url.Parse(s.URL)
if err != nil {
return nil, err
}
c.URL = url
c.Authorizer = DefaultAuthorization(s)
c.InsecureSkipVerify = s.InsecureSkipVerify
c.Logger = &chronograf.NoopLogger{}
return c, nil
}
// DefaultAuthorization creates either a shared JWT builder, basic auth or Noop
// This is copy of the method from chronograf/influx adapted for platform sources.
func DefaultAuthorization(src *platform.Source) influx.Authorizer {
// Optionally, add the shared secret JWT token creation
if src.Username != "" && src.SharedSecret != "" {
return &influx.BearerJWT{
Username: src.Username,
SharedSecret: src.SharedSecret,
}
} else if src.Username != "" && src.Password != "" {
return &influx.BasicAuth{
Username: src.Username,
Password: src.Password,
}
}
return &influx.NoAuthorization{}
}
func newURL(addr, path string) (*url.URL, error) {
u, err := url.Parse(addr)
if err != nil {

View File

@ -1,30 +0,0 @@
package source
import (
"fmt"
platform "github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/http"
"github.com/influxdata/influxdb/v2/http/influxdb"
"github.com/influxdata/influxdb/v2/tenant"
)
// NewBucketService creates a bucket service from a source.
func NewBucketService(s *platform.Source) (platform.BucketService, error) {
switch s.Type {
case platform.SelfSourceType:
// TODO(fntlnz): this is supposed to call a bucket service directly locally,
// we are letting it err for now since we have some refactoring to do on
// how services are instantiated
return nil, fmt.Errorf("self source type not implemented")
case platform.V2SourceType:
httpClient, err := http.NewHTTPClient(s.URL, s.Token, s.InsecureSkipVerify)
if err != nil {
return nil, err
}
return &tenant.BucketClientService{Client: httpClient}, nil
case platform.V1SourceType:
return &influxdb.BucketService{Source: s}, nil
}
return nil, fmt.Errorf("unsupported source type %s", s.Type)
}