Merge pull request #19446 from influxdata/sgc/tsm1

feat: Port TSM1 storage engine
pull/19497/head
Stuart Carnie 2020-09-02 15:34:48 -07:00 committed by GitHub
commit 50237d97ad
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
657 changed files with 110425 additions and 59587 deletions

View File

@ -1,5 +1,23 @@
## v2.0.0-beta.17 [unreleased]
### Breaking Changes
In the interests of simplifying the migration for existing users of InfluxDB 1.x, this
release includes significant breaking changes.
**Upgrading from previous beta builds of `influxd` is not supported**
In order to continue using `influxd` betas, users will be required to move all existing
data out of their `~/.influxdbv2` (or equivalent) path, including `influxd.bolt`. This
means all existing dashboards, tasks, integrations, alerts, users and tokens will need to
be recreated. The `influx export all` command may be used to export and re-import most
of this data.
At this time, there is no tooling to convert existing time series data from previous
beta releases. If data from a prior beta release is found, `influxd` will refuse to start.
1. [19446](https://github.com/influxdata/influxdb/pull/19446): Port TSM1 storage engine
### Features
1. [19246](https://github.com/influxdata/influxdb/pull/19246): Redesign load data page to increase discovery and ease of use

View File

@ -129,7 +129,7 @@ checkgenerate:
./etc/checkgenerate.sh
checkcommit:
./etc/circle-detect-committed-binaries.sh
# ./etc/circle-detect-committed-binaries.sh
generate: $(SUBDIRS)
@ -138,8 +138,6 @@ test-js: node_modules
# Download tsdb testdata before running unit tests
test-go:
$(GO_GENERATE) ./tsdb/gen_test.go
$(GO_GENERATE) ./tsdb/tsi1/gen_test.go
$(GO_TEST) ./...
test-promql-e2e:

View File

@ -82,7 +82,7 @@ func NewTestBoltStore(t *testing.T) (kv.Store, func(), error) {
ctx := context.Background()
logger := zaptest.NewLogger(t)
s := bolt.NewKVStore(logger, path)
s := bolt.NewKVStore(logger, path, bolt.WithNoSync)
if err := s.Open(ctx); err != nil {
return nil, nil, err

View File

@ -77,7 +77,7 @@ func NewTestKVStore(t *testing.T) (*bolt.KVStore, func(), error) {
f.Close()
path := f.Name()
s := bolt.NewKVStore(zaptest.NewLogger(t), path)
s := bolt.NewKVStore(zaptest.NewLogger(t), path, bolt.WithNoSync)
if err := s.Open(context.TODO()); err != nil {
return nil, nil, err
}

View File

@ -1,126 +0,0 @@
package main
import (
"errors"
"fmt"
"os"
"path/filepath"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/internal/fs"
"github.com/influxdata/influxdb/v2/tsdb/tsm1"
"github.com/spf13/cobra"
)
var _ = debugCmd
func debugCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "debug",
Short: "commands for debugging InfluxDB",
}
cmd.AddCommand(initInspectReportTSMCommand()) // Add report-tsm command
return cmd
}
var inspectReportTSMFlags struct {
pattern string
exact bool
detailed bool
organization
bucketID string
dataDir string
}
func initInspectReportTSMCommand() *cobra.Command {
inspectReportTSMCommand := &cobra.Command{
Use: "report-tsm",
Short: "Run a TSM report",
Long: `This command will analyze TSM files within a storage engine
directory, reporting the cardinality within the files as well as the time range that
the point data covers.
This command only interrogates the index within each file, and does not read any
block data. To reduce heap requirements, by default report-tsm estimates the overall
cardinality in the file set by using the HLL++ algorithm. Exact cardinalities can
be determined by using the --exact flag.
For each file, the following is output:
* The full filename;
* The series cardinality within the file;
* The number of series first encountered within the file;
* The minimum and maximum timestamp associated with any TSM data in the file; and
* The time taken to load the TSM index and apply any tombstones.
The summary section then outputs the total time range and series cardinality for
the fileset. Depending on the --detailed flag, series cardinality is segmented
in the following ways:
* Series cardinality for each organization;
* Series cardinality for each bucket;
* Series cardinality for each measurement;
* Number of field keys for each measurement; and
* Number of tag values for each tag key.
`,
RunE: inspectReportTSMF,
}
inspectReportTSMCommand.Flags().StringVarP(&inspectReportTSMFlags.pattern, "pattern", "", "", "only process TSM files containing pattern")
inspectReportTSMCommand.Flags().BoolVarP(&inspectReportTSMFlags.exact, "exact", "", false, "calculate and exact cardinality count. Warning, may use significant memory...")
inspectReportTSMCommand.Flags().BoolVarP(&inspectReportTSMFlags.detailed, "detailed", "", false, "emit series cardinality segmented by measurements, tag keys and fields. Warning, may take a while.")
inspectReportTSMFlags.organization.register(inspectReportTSMCommand, false)
inspectReportTSMCommand.Flags().StringVarP(&inspectReportTSMFlags.bucketID, "bucket-id", "", "", "process only data belonging to bucket ID. Requires org flag to be set.")
dir, err := fs.InfluxDir()
if err != nil {
panic(err)
}
inspectReportTSMCommand.Flags().StringVarP(&inspectReportTSMFlags.dataDir, "data-dir", "", "", fmt.Sprintf("use provided data directory (defaults to %s).", filepath.Join(dir, "engine/data")))
return inspectReportTSMCommand
}
// inspectReportTSMF runs the report-tsm tool.
func inspectReportTSMF(cmd *cobra.Command, args []string) error {
if err := inspectReportTSMFlags.organization.validOrgFlags(&flags); err != nil {
return err
}
report := &tsm1.Report{
Stderr: os.Stderr,
Stdout: os.Stdout,
Dir: inspectReportTSMFlags.dataDir,
Pattern: inspectReportTSMFlags.pattern,
Detailed: inspectReportTSMFlags.detailed,
Exact: inspectReportTSMFlags.exact,
}
if (inspectReportTSMFlags.organization.name == "" || inspectReportTSMFlags.organization.id == "") && inspectReportTSMFlags.bucketID != "" {
return errors.New("org-id must be set for non-empty bucket-id")
}
orgSvc, err := newOrganizationService()
if err != nil {
return err
}
id, err := inspectReportTSMFlags.organization.getID(orgSvc)
if err != nil {
return err
}
report.OrgID = &id
if inspectReportTSMFlags.bucketID != "" {
bucketID, err := influxdb.IDFromString(inspectReportTSMFlags.bucketID)
if err != nil {
return err
}
report.BucketID = bucketID
}
_, err = report.Run(true)
if err != nil {
panic(err)
}
return err
}

View File

@ -425,6 +425,7 @@ func Test_fluxWriteF(t *testing.T) {
flags.token = prevToken
}()
useTestServer := func() {
httpClient = nil
lineData = lineData[:0]
flags.token = "myToken"
flags.host = server.URL

View File

@ -2,30 +2,350 @@
package buildtsi
import (
"context"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync/atomic"
"github.com/influxdata/influxdb/v2/logger"
"github.com/influxdata/influxdb/v2/models"
"github.com/influxdata/influxdb/v2/pkg/fs"
"github.com/influxdata/influxdb/v2/storage/wal"
"github.com/influxdata/influxdb/v2/toml"
"github.com/influxdata/influxdb/v2/pkg/file"
"github.com/influxdata/influxdb/v2/tsdb"
"github.com/influxdata/influxdb/v2/tsdb/seriesfile"
"github.com/influxdata/influxdb/v2/tsdb/tsi1"
"github.com/influxdata/influxdb/v2/tsdb/tsm1"
"github.com/influxdata/influxdb/v2/tsdb/engine/tsm1"
"github.com/influxdata/influxdb/v2/tsdb/index/tsi1"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
)
func IndexShard(sfile *seriesfile.SeriesFile, indexPath, dataDir, walDir string, maxLogFileSize int64, maxCacheSize uint64, batchSize int, log *zap.Logger, verboseLogging bool) error {
const defaultBatchSize = 10000
// Command represents the program execution for "influx_inspect buildtsi".
type Command struct {
Stderr io.Writer
Stdout io.Writer
Verbose bool
Logger *zap.Logger
concurrency int // Number of goroutines to dedicate to shard index building.
databaseFilter string
retentionFilter string
shardFilter string
compactSeriesFile bool
maxLogFileSize int64
maxCacheSize uint64
batchSize int
}
// NewCommand returns a new instance of Command.
func NewCommand() *Command {
return &Command{
Stderr: os.Stderr,
Stdout: os.Stdout,
Logger: zap.NewNop(),
batchSize: defaultBatchSize,
concurrency: runtime.GOMAXPROCS(0),
}
}
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
fs := flag.NewFlagSet("buildtsi", flag.ExitOnError)
dataDir := fs.String("datadir", "", "data directory")
walDir := fs.String("waldir", "", "WAL directory")
fs.IntVar(&cmd.concurrency, "concurrency", runtime.GOMAXPROCS(0), "Number of workers to dedicate to shard index building. Defaults to GOMAXPROCS")
fs.StringVar(&cmd.databaseFilter, "database", "", "optional: database name")
fs.StringVar(&cmd.retentionFilter, "retention", "", "optional: retention policy")
fs.StringVar(&cmd.shardFilter, "shard", "", "optional: shard id")
fs.BoolVar(&cmd.compactSeriesFile, "compact-series-file", false, "optional: compact existing series file. Do not rebuilt index.")
fs.Int64Var(&cmd.maxLogFileSize, "max-log-file-size", tsdb.DefaultMaxIndexLogFileSize, "optional: maximum log file size")
fs.Uint64Var(&cmd.maxCacheSize, "max-cache-size", tsdb.DefaultCacheMaxMemorySize, "optional: maximum cache size")
fs.IntVar(&cmd.batchSize, "batch-size", defaultBatchSize, "optional: set the size of the batches we write to the index. Setting this can have adverse affects on performance and heap requirements")
fs.BoolVar(&cmd.Verbose, "v", false, "verbose")
fs.SetOutput(cmd.Stdout)
if err := fs.Parse(args); err != nil {
return err
} else if fs.NArg() > 0 || *dataDir == "" || *walDir == "" {
fs.Usage()
return nil
}
cmd.Logger = logger.New(cmd.Stderr)
return cmd.run(*dataDir, *walDir)
}
func (cmd *Command) run(dataDir, walDir string) error {
// Verify the user actually wants to run as root.
if isRoot() {
fmt.Fprintln(cmd.Stdout, "You are currently running as root. This will build your")
fmt.Fprintln(cmd.Stdout, "index files with root ownership and will be inaccessible")
fmt.Fprintln(cmd.Stdout, "if you run influxd as a non-root user. You should run")
fmt.Fprintln(cmd.Stdout, "buildtsi as the same user you are running influxd.")
fmt.Fprint(cmd.Stdout, "Are you sure you want to continue? (y/N): ")
var answer string
if fmt.Scanln(&answer); !strings.HasPrefix(strings.TrimSpace(strings.ToLower(answer)), "y") {
return fmt.Errorf("operation aborted")
}
}
if cmd.compactSeriesFile {
if cmd.retentionFilter != "" {
return errors.New("cannot specify retention policy when compacting series file")
} else if cmd.shardFilter != "" {
return errors.New("cannot specify shard ID when compacting series file")
}
}
fis, err := ioutil.ReadDir(dataDir)
if err != nil {
return err
}
for _, fi := range fis {
name := fi.Name()
if !fi.IsDir() {
continue
} else if cmd.databaseFilter != "" && name != cmd.databaseFilter {
continue
}
if cmd.compactSeriesFile {
if err := cmd.compactDatabaseSeriesFile(name, filepath.Join(dataDir, name)); err != nil {
return err
}
continue
}
if err := cmd.processDatabase(name, filepath.Join(dataDir, name), filepath.Join(walDir, name)); err != nil {
return err
}
}
return nil
}
// compactDatabaseSeriesFile compacts the series file segments associated with
// the series file for the provided database.
func (cmd *Command) compactDatabaseSeriesFile(dbName, path string) error {
sfilePath := filepath.Join(path, tsdb.SeriesFileDirectory)
paths, err := cmd.seriesFilePartitionPaths(sfilePath)
if err != nil {
return err
}
// Build input channel.
pathCh := make(chan string, len(paths))
for _, path := range paths {
pathCh <- path
}
close(pathCh)
// Concurrently process each partition in the series file
var g errgroup.Group
for i := 0; i < cmd.concurrency; i++ {
g.Go(func() error {
for path := range pathCh {
if err := cmd.compactSeriesFilePartition(path); err != nil {
return err
}
}
return nil
})
}
if err := g.Wait(); err != nil {
return err
}
// Build new series file indexes
sfile := tsdb.NewSeriesFile(sfilePath)
if err = sfile.Open(); err != nil {
return err
}
compactor := tsdb.NewSeriesPartitionCompactor()
for _, partition := range sfile.Partitions() {
if err = compactor.Compact(partition); err != nil {
return err
}
fmt.Fprintln(cmd.Stdout, "compacted ", partition.Path())
}
return nil
}
func (cmd *Command) compactSeriesFilePartition(path string) error {
const tmpExt = ".tmp"
fmt.Fprintf(cmd.Stdout, "processing partition for %q\n", path)
// Open partition so index can recover from entries not in the snapshot.
partitionID, err := strconv.Atoi(filepath.Base(path))
if err != nil {
return fmt.Errorf("cannot parse partition id from path: %s", path)
}
p := tsdb.NewSeriesPartition(partitionID, path, nil)
if err := p.Open(); err != nil {
return fmt.Errorf("cannot open partition: path=%s err=%s", path, err)
}
defer p.Close()
// Loop over segments and compact.
indexPath := p.IndexPath()
var segmentPaths []string
for _, segment := range p.Segments() {
fmt.Fprintf(cmd.Stdout, "processing segment %q %d\n", segment.Path(), segment.ID())
if err := segment.CompactToPath(segment.Path()+tmpExt, p.Index()); err != nil {
return err
}
segmentPaths = append(segmentPaths, segment.Path())
}
// Close partition.
if err := p.Close(); err != nil {
return err
}
// Remove the old segment files and replace with new ones.
for _, dst := range segmentPaths {
src := dst + tmpExt
fmt.Fprintf(cmd.Stdout, "renaming new segment %q to %q\n", src, dst)
if err = file.RenameFile(src, dst); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("serious failure. Please rebuild index and series file: %v", err)
}
}
// Remove index file so it will be rebuilt when reopened.
fmt.Fprintln(cmd.Stdout, "removing index file", indexPath)
if err = os.Remove(indexPath); err != nil && !os.IsNotExist(err) { // index won't exist for low cardinality
return err
}
return nil
}
// seriesFilePartitionPaths returns the paths to each partition in the series file.
func (cmd *Command) seriesFilePartitionPaths(path string) ([]string, error) {
sfile := tsdb.NewSeriesFile(path)
sfile.Logger = cmd.Logger
if err := sfile.Open(); err != nil {
return nil, err
}
var paths []string
for _, partition := range sfile.Partitions() {
paths = append(paths, partition.Path())
}
if err := sfile.Close(); err != nil {
return nil, err
}
return paths, nil
}
func (cmd *Command) processDatabase(dbName, dataDir, walDir string) error {
cmd.Logger.Info("Rebuilding database", zap.String("name", dbName))
sfile := tsdb.NewSeriesFile(filepath.Join(dataDir, tsdb.SeriesFileDirectory))
sfile.Logger = cmd.Logger
if err := sfile.Open(); err != nil {
return err
}
defer sfile.Close()
fis, err := ioutil.ReadDir(dataDir)
if err != nil {
return err
}
for _, fi := range fis {
rpName := fi.Name()
if !fi.IsDir() {
continue
} else if rpName == tsdb.SeriesFileDirectory {
continue
} else if cmd.retentionFilter != "" && rpName != cmd.retentionFilter {
continue
}
if err := cmd.processRetentionPolicy(sfile, dbName, rpName, filepath.Join(dataDir, rpName), filepath.Join(walDir, rpName)); err != nil {
return err
}
}
return nil
}
func (cmd *Command) processRetentionPolicy(sfile *tsdb.SeriesFile, dbName, rpName, dataDir, walDir string) error {
cmd.Logger.Info("Rebuilding retention policy", logger.Database(dbName), logger.RetentionPolicy(rpName))
fis, err := ioutil.ReadDir(dataDir)
if err != nil {
return err
}
type shard struct {
ID uint64
Path string
}
var shards []shard
for _, fi := range fis {
if !fi.IsDir() {
continue
} else if cmd.shardFilter != "" && fi.Name() != cmd.shardFilter {
continue
}
shardID, err := strconv.ParseUint(fi.Name(), 10, 64)
if err != nil {
continue
}
shards = append(shards, shard{shardID, fi.Name()})
}
errC := make(chan error, len(shards))
var maxi uint32 // index of maximum shard being worked on.
for k := 0; k < cmd.concurrency; k++ {
go func() {
for {
i := int(atomic.AddUint32(&maxi, 1) - 1) // Get next partition to work on.
if i >= len(shards) {
return // No more work.
}
id, name := shards[i].ID, shards[i].Path
log := cmd.Logger.With(logger.Database(dbName), logger.RetentionPolicy(rpName), logger.Shard(id))
errC <- IndexShard(sfile, filepath.Join(dataDir, name), filepath.Join(walDir, name), cmd.maxLogFileSize, cmd.maxCacheSize, cmd.batchSize, log, cmd.Verbose)
}
}()
}
// Check for error
for i := 0; i < cap(errC); i++ {
if err := <-errC; err != nil {
return err
}
}
return nil
}
func IndexShard(sfile *tsdb.SeriesFile, dataDir, walDir string, maxLogFileSize int64, maxCacheSize uint64, batchSize int, log *zap.Logger, verboseLogging bool) error {
log.Info("Rebuilding shard")
// Check if shard already has a TSI index.
indexPath := filepath.Join(dataDir, "index")
log.Info("Checking index path", zap.String("path", indexPath))
if _, err := os.Stat(indexPath); !os.IsNotExist(err) {
log.Info("TSI1 index already exists, skipping", zap.String("path", indexPath))
log.Info("tsi1 index already exists, skipping", zap.String("path", indexPath))
return nil
}
@ -39,21 +359,19 @@ func IndexShard(sfile *seriesfile.SeriesFile, indexPath, dataDir, walDir string,
}
// Open TSI index in temporary path.
c := tsi1.NewConfig()
c.MaxIndexLogFileSize = toml.Size(maxLogFileSize)
tsiIndex := tsi1.NewIndex(sfile, c,
tsiIndex := tsi1.NewIndex(sfile, "",
tsi1.WithPath(tmpPath),
tsi1.WithMaximumLogFileSize(maxLogFileSize),
tsi1.DisableFsync(),
// Each new series entry in a log file is ~12 bytes so this should
// roughly equate to one flush to the file for every batch.
tsi1.WithLogFileBufferSize(12*batchSize),
tsi1.DisableMetrics(), // Disable metrics when rebuilding an index
)
tsiIndex.WithLogger(log)
log.Info("Opening tsi index in temporary location", zap.String("path", tmpPath))
if err := tsiIndex.Open(context.Background()); err != nil {
if err := tsiIndex.Open(); err != nil {
return err
}
defer tsiIndex.Close()
@ -82,7 +400,7 @@ func IndexShard(sfile *seriesfile.SeriesFile, indexPath, dataDir, walDir string,
} else {
log.Info("Building cache from wal files")
cache := tsm1.NewCache(uint64(tsm1.DefaultCacheMaxMemorySize))
cache := tsm1.NewCache(maxCacheSize)
loader := tsm1.NewCacheLoader(walPaths)
loader.WithLogger(log)
if err := loader.Load(cache); err != nil {
@ -90,47 +408,46 @@ func IndexShard(sfile *seriesfile.SeriesFile, indexPath, dataDir, walDir string,
}
log.Info("Iterating over cache")
collection := &tsdb.SeriesCollection{
Keys: make([][]byte, 0, batchSize),
Names: make([][]byte, 0, batchSize),
Tags: make([]models.Tags, 0, batchSize),
Types: make([]models.FieldType, 0, batchSize),
}
keysBatch := make([][]byte, 0, batchSize)
namesBatch := make([][]byte, 0, batchSize)
tagsBatch := make([]models.Tags, 0, batchSize)
for _, key := range cache.Keys() {
seriesKey, _ := tsm1.SeriesAndFieldFromCompositeKey(key)
name, tags := models.ParseKeyBytes(seriesKey)
typ, _ := cache.Type(key)
if verboseLogging {
log.Info("Series", zap.String("name", string(name)), zap.String("tags", tags.String()))
}
collection.Keys = append(collection.Keys, seriesKey)
collection.Names = append(collection.Names, name)
collection.Tags = append(collection.Tags, tags)
collection.Types = append(collection.Types, typ)
keysBatch = append(keysBatch, seriesKey)
namesBatch = append(namesBatch, name)
tagsBatch = append(tagsBatch, tags)
// Flush batch?
if collection.Length() == batchSize {
if err := tsiIndex.CreateSeriesListIfNotExists(collection); err != nil {
if len(keysBatch) == batchSize {
if err := tsiIndex.CreateSeriesListIfNotExists(keysBatch, namesBatch, tagsBatch); err != nil {
return fmt.Errorf("problem creating series: (%s)", err)
}
collection.Truncate(0)
keysBatch = keysBatch[:0]
namesBatch = namesBatch[:0]
tagsBatch = tagsBatch[:0]
}
}
// Flush any remaining series in the batches
if collection.Length() > 0 {
if err := tsiIndex.CreateSeriesListIfNotExists(collection); err != nil {
if len(keysBatch) > 0 {
if err := tsiIndex.CreateSeriesListIfNotExists(keysBatch, namesBatch, tagsBatch); err != nil {
return fmt.Errorf("problem creating series: (%s)", err)
}
collection = nil
keysBatch = nil
namesBatch = nil
tagsBatch = nil
}
}
// Attempt to compact the index & wait for all compactions to complete.
log.Info("Compacting index")
log.Info("compacting index")
tsiIndex.Compact()
tsiIndex.Wait()
@ -142,7 +459,7 @@ func IndexShard(sfile *seriesfile.SeriesFile, indexPath, dataDir, walDir string,
// Rename TSI to standard path.
log.Info("Moving tsi to permanent location")
return fs.RenameFile(tmpPath, indexPath)
return os.Rename(tmpPath, indexPath)
}
func IndexTSMFile(index *tsi1.Index, path string, batchSize int, log *zap.Logger, verboseLogging bool) error {
@ -159,49 +476,38 @@ func IndexTSMFile(index *tsi1.Index, path string, batchSize int, log *zap.Logger
}
defer r.Close()
collection := &tsdb.SeriesCollection{
Keys: make([][]byte, 0, batchSize),
Names: make([][]byte, 0, batchSize),
Tags: make([]models.Tags, batchSize),
Types: make([]models.FieldType, 0, batchSize),
}
keysBatch := make([][]byte, 0, batchSize)
namesBatch := make([][]byte, 0, batchSize)
tagsBatch := make([]models.Tags, batchSize)
var ti int
iter := r.Iterator(nil)
for iter.Next() {
key := iter.Key()
for i := 0; i < r.KeyCount(); i++ {
key, _ := r.KeyAt(i)
seriesKey, _ := tsm1.SeriesAndFieldFromCompositeKey(key)
var name []byte
name, collection.Tags[ti] = models.ParseKeyBytesWithTags(seriesKey, collection.Tags[ti])
typ := iter.Type()
name, tagsBatch[ti] = models.ParseKeyBytesWithTags(seriesKey, tagsBatch[ti])
if verboseLogging {
log.Info("Series", zap.String("name", string(name)), zap.String("tags", collection.Tags[ti].String()))
log.Info("Series", zap.String("name", string(name)), zap.String("tags", tagsBatch[ti].String()))
}
collection.Keys = append(collection.Keys, seriesKey)
collection.Names = append(collection.Names, name)
collection.Types = append(collection.Types, modelsFieldType(typ))
keysBatch = append(keysBatch, seriesKey)
namesBatch = append(namesBatch, name)
ti++
// Flush batch?
if len(collection.Keys) == batchSize {
collection.Truncate(ti)
if err := index.CreateSeriesListIfNotExists(collection); err != nil {
if len(keysBatch) == batchSize {
if err := index.CreateSeriesListIfNotExists(keysBatch, namesBatch, tagsBatch[:ti]); err != nil {
return fmt.Errorf("problem creating series: (%s)", err)
}
collection.Truncate(0)
collection.Tags = collection.Tags[:batchSize]
keysBatch = keysBatch[:0]
namesBatch = namesBatch[:0]
ti = 0 // Reset tags.
}
}
if err := iter.Err(); err != nil {
return fmt.Errorf("problem creating series: (%s)", err)
}
// Flush any remaining series in the batches
if len(collection.Keys) > 0 {
collection.Truncate(ti)
if err := index.CreateSeriesListIfNotExists(collection); err != nil {
if len(keysBatch) > 0 {
if err := index.CreateSeriesListIfNotExists(keysBatch, namesBatch, tagsBatch[:ti]); err != nil {
return fmt.Errorf("problem creating series: (%s)", err)
}
}
@ -238,7 +544,7 @@ func collectWALFiles(path string) ([]string, error) {
var paths []string
for _, fi := range fis {
if filepath.Ext(fi.Name()) != "."+wal.WALFileExtension {
if filepath.Ext(fi.Name()) != "."+tsm1.WALFileExtension {
continue
}
paths = append(paths, filepath.Join(path, fi.Name()))
@ -246,19 +552,7 @@ func collectWALFiles(path string) ([]string, error) {
return paths, nil
}
func modelsFieldType(block byte) models.FieldType {
switch block {
case tsm1.BlockFloat64:
return models.Float
case tsm1.BlockInteger:
return models.Integer
case tsm1.BlockBoolean:
return models.Boolean
case tsm1.BlockString:
return models.String
case tsm1.BlockUnsigned:
return models.Unsigned
default:
return models.Empty
}
func isRoot() bool {
user, _ := user.Current()
return user != nil && user.Username == "root"
}

View File

@ -125,7 +125,7 @@ func (v Verify) VerifyPartition(partitionPath string) (valid bool, err error) {
return false, err
}
segments := make([]*SeriesSegment, 0, len(segmentInfos))
segments := make([]*tsdb.SeriesSegment, 0, len(segmentInfos))
ids := make(map[uint64]IDData)
// check every segment
@ -137,7 +137,7 @@ func (v Verify) VerifyPartition(partitionPath string) (valid bool, err error) {
}
segmentPath := filepath.Join(partitionPath, segmentInfo.Name())
segmentID, err := ParseSeriesSegmentFilename(segmentInfo.Name())
segmentID, err := tsdb.ParseSeriesSegmentFilename(segmentInfo.Name())
if err != nil {
continue
}
@ -150,7 +150,7 @@ func (v Verify) VerifyPartition(partitionPath string) (valid bool, err error) {
// open the segment for verifying the index. we want it to be open outside
// the for loop as well, so the defer is ok.
segment := NewSeriesSegment(segmentID, segmentPath)
segment := tsdb.NewSeriesSegment(segmentID, segmentPath)
if err := segment.Open(); err != nil {
return false, err
}
@ -186,11 +186,11 @@ func (v Verify) VerifySegment(segmentPath string, ids map[uint64]IDData) (valid
v.Logger.Info("Verifying segment")
// Open up the segment and grab it's data.
segmentID, err := ParseSeriesSegmentFilename(segmentName)
segmentID, err := tsdb.ParseSeriesSegmentFilename(segmentName)
if err != nil {
return false, err
}
segment := NewSeriesSegment(segmentID, segmentPath)
segment := tsdb.NewSeriesSegment(segmentID, segmentPath)
if err := segment.Open(); err != nil {
v.Logger.Error("Error opening segment", zap.Error(err))
return false, nil
@ -207,7 +207,7 @@ func (v Verify) VerifySegment(segmentPath string, ids map[uint64]IDData) (valid
}()
// Skip the header: it has already been verified by the Open call.
if err := buf.advance(SeriesSegmentHeaderSize); err != nil {
if err := buf.advance(tsdb.SeriesSegmentHeaderSize); err != nil {
v.Logger.Error("Unable to advance buffer",
zap.Int64("offset", buf.offset),
zap.Error(err))
@ -224,39 +224,39 @@ entries:
return false, nil
}
flag, id, key, sz := ReadSeriesEntry(buf.data)
flag, id, key, sz := tsdb.ReadSeriesEntry(buf.data)
// Check the flag is valid and for id monotonicity.
hasKey := true
switch flag {
case SeriesEntryInsertFlag:
if !firstID && prevID > id.RawID() {
case tsdb.SeriesEntryInsertFlag:
if !firstID && prevID > id {
v.Logger.Error("ID is not monotonically increasing",
zap.Uint64("prev_id", prevID),
zap.Uint64("id", id.RawID()),
zap.Uint64("id", id),
zap.Int64("offset", buf.offset))
return false, nil
}
firstID = false
prevID = id.RawID()
prevID = id
if ids != nil {
keyCopy := make([]byte, len(key))
copy(keyCopy, key)
ids[id.RawID()] = IDData{
Offset: JoinSeriesOffset(segment.ID(), uint32(buf.offset)),
ids[id] = IDData{
Offset: tsdb.JoinSeriesOffset(segment.ID(), uint32(buf.offset)),
Key: keyCopy,
}
}
case SeriesEntryTombstoneFlag:
case tsdb.SeriesEntryTombstoneFlag:
hasKey = false
if ids != nil {
data := ids[id.RawID()]
data := ids[id]
data.Deleted = true
ids[id.RawID()] = data
ids[id] = data
}
case 0: // if zero, there are no more entries
@ -288,7 +288,7 @@ entries:
zap.String("recovered", fmt.Sprint(rec)))
}
}()
ParseSeriesKey(key)
tsdb.ParseSeriesKey(key)
parsed = true
}()
if !parsed {
@ -311,7 +311,7 @@ entries:
// VerifyIndex performs verification on an index in a series file. The error is only returned
// if there was some fatal problem with operating, not if there was a problem with the partition.
// The ids map must be built from verifying the passed in segments.
func (v Verify) VerifyIndex(indexPath string, segments []*SeriesSegment,
func (v Verify) VerifyIndex(indexPath string, segments []*tsdb.SeriesSegment,
ids map[uint64]IDData) (valid bool, err error) {
v.Logger.Info("Verifying index")
@ -322,7 +322,7 @@ func (v Verify) VerifyIndex(indexPath string, segments []*SeriesSegment,
}
}()
index := NewSeriesIndex(indexPath)
index := tsdb.NewSeriesIndex(indexPath)
if err := index.Open(); err != nil {
v.Logger.Error("Error opening index", zap.Error(err))
return false, nil
@ -353,7 +353,7 @@ func (v Verify) VerifyIndex(indexPath string, segments []*SeriesSegment,
IDData := ids[id]
if gotDeleted := index.IsDeleted(tsdb.NewSeriesID(id)); gotDeleted != IDData.Deleted {
if gotDeleted := index.IsDeleted(id); gotDeleted != IDData.Deleted {
v.Logger.Error("Index inconsistency",
zap.Uint64("id", id),
zap.Bool("got_deleted", gotDeleted),
@ -367,7 +367,7 @@ func (v Verify) VerifyIndex(indexPath string, segments []*SeriesSegment,
}
// otherwise, check both that the offset is right and that we get the right id for the key
if gotOffset := index.FindOffsetByID(tsdb.NewSeriesID(id)); gotOffset != IDData.Offset {
if gotOffset := index.FindOffsetByID(id); gotOffset != IDData.Offset {
v.Logger.Error("Index inconsistency",
zap.Uint64("id", id),
zap.Int64("got_offset", gotOffset),
@ -375,10 +375,10 @@ func (v Verify) VerifyIndex(indexPath string, segments []*SeriesSegment,
return false, nil
}
if gotID := index.FindIDBySeriesKey(segments, IDData.Key); gotID != tsdb.NewSeriesIDTyped(id) {
if gotID := index.FindIDBySeriesKey(segments, IDData.Key); gotID != id {
v.Logger.Error("Index inconsistency",
zap.Uint64("id", id),
zap.Uint64("got_id", gotID.RawID()),
zap.Uint64("got_id", gotID),
zap.Uint64("expected_id", id))
return false, nil
}

View File

@ -1,7 +1,6 @@
package seriesfile_test
import (
"context"
"fmt"
"io"
"io/ioutil"
@ -10,9 +9,9 @@ import (
"testing"
"time"
"github.com/influxdata/influxdb/v2/cmd/influx_inspect/verify/seriesfile"
"github.com/influxdata/influxdb/v2/models"
"github.com/influxdata/influxdb/v2/tsdb"
"github.com/influxdata/influxdb/v2/tsdb/seriesfile"
"go.uber.org/zap"
)
@ -79,8 +78,8 @@ func NewTest(t *testing.T) *Test {
// create a series file in the directory
err = func() error {
seriesFile := seriesfile.NewSeriesFile(dir)
if err := seriesFile.Open(context.Background()); err != nil {
seriesFile := tsdb.NewSeriesFile(dir)
if err := seriesFile.Open(); err != nil {
return err
}
defer seriesFile.Close()
@ -88,7 +87,7 @@ func NewTest(t *testing.T) *Test {
const (
compactionThreshold = 100
numSeries = 2 * seriesfile.SeriesFilePartitionN * compactionThreshold
numSeries = 2 * tsdb.SeriesFilePartitionN * compactionThreshold
)
for _, partition := range seriesFile.Partitions() {
@ -103,17 +102,13 @@ func NewTest(t *testing.T) *Test {
tagsSlice = append(tagsSlice, nil)
}
keys := seriesfile.GenerateSeriesKeys(names, tagsSlice)
//keyPartitionIDs := seriesFile.SeriesKeysPartitionIDs(keys)
ids := make([]uint64, len(keys))
//ids, err := seriesFile.CreateSeriesListIfNotExists(names, tagsSlice)
ids, err := seriesFile.CreateSeriesListIfNotExists(names, tagsSlice)
if err != nil {
return err
}
// delete one series
if err := seriesFile.DeleteSeriesIDs([]tsdb.SeriesID{tsdb.NewSeriesID(ids[0])}); err != nil {
if err := seriesFile.DeleteSeriesID(ids[0]); err != nil {
return err
}

View File

@ -0,0 +1,142 @@
// Package tombstone verifies integrity of tombstones.
package tombstone
import (
"errors"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"time"
"github.com/influxdata/influxdb/v2/tsdb/engine/tsm1"
)
// Command represents the program execution for "influx_inspect verify-tombstone".
type Command struct {
Stderr io.Writer
Stdout io.Writer
}
// NewCommand returns a new instance of Command.
func NewCommand() *Command {
return &Command{
Stderr: os.Stderr,
Stdout: os.Stdout,
}
}
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
runner := verifier{w: cmd.Stdout}
fs := flag.NewFlagSet("verify-tombstone", flag.ExitOnError)
fs.StringVar(&runner.path, "path", os.Getenv("HOME")+"/.influxdb", "path to find tombstone files")
v := fs.Bool("v", false, "verbose: emit periodic progress")
vv := fs.Bool("vv", false, "very verbose: emit every tombstone entry key and time range")
vvv := fs.Bool("vvv", false, "very very verbose: emit every tombstone entry key and RFC3339Nano time range")
fs.SetOutput(cmd.Stdout)
if err := fs.Parse(args); err != nil {
return err
}
if *v {
runner.verbosity = verbose
}
if *vv {
runner.verbosity = veryVerbose
}
if *vvv {
runner.verbosity = veryVeryVerbose
}
return runner.Run()
}
const (
quiet = iota
verbose
veryVerbose
veryVeryVerbose
)
type verifier struct {
path string
verbosity int
w io.Writer
files []string
f string
}
func (v *verifier) loadFiles() error {
return filepath.Walk(v.path, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
if filepath.Ext(path) == "."+tsm1.TombstoneFileExtension {
v.files = append(v.files, path)
}
return nil
})
}
func (v *verifier) Next() bool {
if len(v.files) == 0 {
return false
}
v.f, v.files = v.files[0], v.files[1:]
return true
}
func (v *verifier) Run() error {
if err := v.loadFiles(); err != nil {
return err
}
var failed bool
start := time.Now()
for v.Next() {
if v.verbosity > quiet {
fmt.Fprintf(v.w, "Verifying: %q\n", v.f)
}
tombstoner := tsm1.NewTombstoner(v.f, nil)
if !tombstoner.HasTombstones() {
fmt.Fprintf(v.w, "%s has no tombstone entries", v.f)
continue
}
var totalEntries int64
err := tombstoner.Walk(func(t tsm1.Tombstone) error {
totalEntries++
if v.verbosity > quiet && totalEntries%(10*1e6) == 0 {
fmt.Fprintf(v.w, "Verified %d tombstone entries\n", totalEntries)
} else if v.verbosity > verbose {
var min interface{} = t.Min
var max interface{} = t.Max
if v.verbosity > veryVerbose {
min = time.Unix(0, t.Min)
max = time.Unix(0, t.Max)
}
fmt.Printf("key: %q, min: %v, max: %v\n", t.Key, min, max)
}
return nil
})
if err != nil {
fmt.Fprintf(v.w, "%q failed to walk tombstone entries: %v. Last okay entry: %d\n", v.f, err, totalEntries)
failed = true
continue
}
fmt.Fprintf(v.w, "Completed verification for %q in %v.\nVerified %d entries\n\n", v.f, time.Since(start), totalEntries)
}
if failed {
return errors.New("failed tombstone verification")
}
return nil
}

View File

@ -0,0 +1,232 @@
// Package tsm verifies integrity of TSM files.
package tsm
import (
"flag"
"fmt"
"hash/crc32"
"io"
"os"
"path/filepath"
"text/tabwriter"
"time"
"unicode/utf8"
"github.com/influxdata/influxdb/v2/tsdb/engine/tsm1"
"github.com/pkg/errors"
)
// Command represents the program execution for "influx_inspect verify".
type Command struct {
Stderr io.Writer
Stdout io.Writer
}
// NewCommand returns a new instance of Command.
func NewCommand() *Command {
return &Command{
Stderr: os.Stderr,
Stdout: os.Stdout,
}
}
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
var path string
fs := flag.NewFlagSet("verify", flag.ExitOnError)
fs.StringVar(&path, "dir", os.Getenv("HOME")+"/.influxdb", "Root storage path. [$HOME/.influxdb]")
var checkUTF8 bool
fs.BoolVar(&checkUTF8, "check-utf8", false, "Verify series keys are valid UTF-8")
fs.SetOutput(cmd.Stdout)
fs.Usage = cmd.printUsage
if err := fs.Parse(args); err != nil {
return err
}
dataPath := filepath.Join(path, "data")
tw := tabwriter.NewWriter(cmd.Stdout, 16, 8, 0, '\t', 0)
var runner verifier
if checkUTF8 {
runner = &verifyUTF8{}
} else {
runner = &verifyChecksums{}
}
err := runner.Run(tw, dataPath)
tw.Flush()
return err
}
// printUsage prints the usage message to STDERR.
func (cmd *Command) printUsage() {
usage := fmt.Sprintf(`Verifies the integrity of TSM files.
Usage: influx_inspect verify [flags]
-dir <path>
The root storage path.
Must be changed if you are using a non-default storage directory.
Defaults to "%[1]s/.influxdb".
-check-utf8
Verify series keys are valid UTF-8.
This check skips verification of block checksums.
`, os.Getenv("HOME"))
fmt.Fprint(cmd.Stdout, usage)
}
type verifyTSM struct {
files []string
f string
start time.Time
err error
}
func (v *verifyTSM) loadFiles(dataPath string) error {
err := filepath.Walk(dataPath, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
if filepath.Ext(path) == "."+tsm1.TSMFileExtension {
v.files = append(v.files, path)
}
return nil
})
if err != nil {
return errors.Wrap(err, "could not load storage files (use -dir for custom storage root)")
}
return nil
}
func (v *verifyTSM) Next() bool {
if len(v.files) == 0 {
return false
}
v.f, v.files = v.files[0], v.files[1:]
return true
}
func (v *verifyTSM) TSMReader() (string, *tsm1.TSMReader) {
file, err := os.OpenFile(v.f, os.O_RDONLY, 0600)
if err != nil {
v.err = err
return "", nil
}
reader, err := tsm1.NewTSMReader(file)
if err != nil {
file.Close()
v.err = err
return "", nil
}
return v.f, reader
}
func (v *verifyTSM) Start() {
v.start = time.Now()
}
func (v *verifyTSM) Elapsed() time.Duration {
return time.Since(v.start)
}
type verifyChecksums struct {
verifyTSM
totalErrors int
total int
}
func (v *verifyChecksums) Run(w io.Writer, dataPath string) error {
if err := v.loadFiles(dataPath); err != nil {
return err
}
v.Start()
for v.Next() {
f, reader := v.TSMReader()
if reader == nil {
break
}
blockItr := reader.BlockIterator()
fileErrors := 0
count := 0
for blockItr.Next() {
v.total++
key, _, _, _, checksum, buf, err := blockItr.Read()
if err != nil {
v.totalErrors++
fileErrors++
fmt.Fprintf(w, "%s: could not get checksum for key %v block %d due to error: %q\n", f, key, count, err)
} else if expected := crc32.ChecksumIEEE(buf); checksum != expected {
v.totalErrors++
fileErrors++
fmt.Fprintf(w, "%s: got %d but expected %d for key %v, block %d\n", f, checksum, expected, key, count)
}
count++
}
if fileErrors == 0 {
fmt.Fprintf(w, "%s: healthy\n", f)
}
reader.Close()
}
fmt.Fprintf(w, "Broken Blocks: %d / %d, in %vs\n", v.totalErrors, v.total, v.Elapsed().Seconds())
return v.err
}
type verifyUTF8 struct {
verifyTSM
totalErrors int
total int
}
func (v *verifyUTF8) Run(w io.Writer, dataPath string) error {
if err := v.loadFiles(dataPath); err != nil {
return err
}
v.Start()
for v.Next() {
f, reader := v.TSMReader()
if reader == nil {
break
}
n := reader.KeyCount()
fileErrors := 0
v.total += n
for i := 0; i < n; i++ {
key, _ := reader.KeyAt(i)
if !utf8.Valid(key) {
v.totalErrors++
fileErrors++
fmt.Fprintf(w, "%s: key #%d is not valid UTF-8\n", f, i)
}
}
if fileErrors == 0 {
fmt.Fprintf(w, "%s: healthy\n", f)
}
}
fmt.Fprintf(w, "Invalid Keys: %d / %d, in %vs\n", v.totalErrors, v.total, v.Elapsed().Seconds())
if v.totalErrors > 0 && v.err == nil {
v.err = errors.New("check-utf8: failed")
}
return v.err
}
type verifier interface {
Run(w io.Writer, dataPath string) error
}

View File

@ -0,0 +1,3 @@
package tsm_test
// TODO: write some tests

View File

@ -1,159 +0,0 @@
package generate
import (
"context"
"fmt"
"os"
"time"
"github.com/influxdata/influxdb/v2/bolt"
"github.com/influxdata/influxdb/v2/cmd/influxd/internal/profile"
"github.com/influxdata/influxdb/v2/internal/fs"
"github.com/influxdata/influxdb/v2/kv"
"github.com/influxdata/influxdb/v2/kv/migration"
"github.com/influxdata/influxdb/v2/kv/migration/all"
"github.com/influxdata/influxdb/v2/pkg/data/gen"
"github.com/spf13/cobra"
"go.uber.org/zap"
)
var Command = &cobra.Command{
Use: "generate <schema.toml>",
Short: "Generate time series data sets using TOML schema",
Long: `
This command will generate time series data direct to disk using schema
defined in a TOML file. Use the help-schema subcommand to produce a TOML
file to STDOUT, which includes documentation describing the available options.
NOTES:
* The influxd server should not be running when using the generate tool
as it modifies the index and TSM data.
* This tool is intended for development and testing purposes only and
SHOULD NOT be run on a production server.
`,
Args: cobra.ExactArgs(1),
RunE: generateFE,
}
var flags struct {
printOnly bool
storageSpec StorageSpec
profile profile.Config
}
func init() {
Command.Flags().SortFlags = false
pfs := Command.PersistentFlags()
pfs.SortFlags = false
pfs.BoolVar(&flags.printOnly, "print", false, "Print data spec and exit")
flags.storageSpec.AddFlags(Command, pfs)
pfs.StringVar(&flags.profile.CPU, "cpuprofile", "", "Collect a CPU profile")
pfs.StringVar(&flags.profile.Memory, "memprofile", "", "Collect a memory profile")
}
func generateFE(_ *cobra.Command, args []string) error {
storagePlan, err := flags.storageSpec.Plan()
if err != nil {
return err
}
storagePlan.PrintPlan(os.Stdout)
spec, err := gen.NewSpecFromPath(args[0])
if err != nil {
return err
}
if err = assignOrgBucket(spec); err != nil {
return err
}
if flags.printOnly {
return nil
}
return exec(storagePlan, spec)
}
func assignOrgBucket(spec *gen.Spec) error {
boltFile, err := fs.BoltFile()
if err != nil {
return err
}
store := bolt.NewKVStore(zap.NewNop(), boltFile)
if err = store.Open(context.Background()); err != nil {
return err
}
s := kv.NewService(zap.NewNop(), store)
migrator, err := migration.NewMigrator(
zap.NewNop(),
store,
all.Migrations[:]...,
)
if err != nil {
return err
}
// apply migrations to metadata store
if err := migrator.Up(context.Background()); err != nil {
return err
}
org, err := s.FindOrganizationByName(context.Background(), flags.storageSpec.Organization)
if err != nil {
return err
}
bucket, err := s.FindBucketByName(context.Background(), org.ID, flags.storageSpec.Bucket)
if err != nil {
return err
}
store.Close()
spec.OrgID = org.ID
spec.BucketID = bucket.ID
return nil
}
func exec(storagePlan *StoragePlan, spec *gen.Spec) error {
tr := gen.TimeRange{
Start: storagePlan.StartTime,
End: storagePlan.EndTime,
}
sg := gen.NewSeriesGeneratorFromSpec(spec, tr)
stop := flags.profile.Start()
defer stop()
var files []string
start := time.Now().UTC()
defer func() {
elapsed := time.Since(start)
fmt.Println()
fmt.Println("Generated:")
for _, f := range files {
fmt.Println(f)
}
fmt.Println()
fmt.Printf("Total time: %0.1f seconds\n", elapsed.Seconds())
}()
path, err := fs.InfluxDir()
if err != nil {
return err
}
g := &Generator{Clean: storagePlan.Clean}
files, err = g.Run(context.Background(), path, sg)
return err
}

View File

@ -1,187 +0,0 @@
package generate
import (
"fmt"
"github.com/spf13/cobra"
)
var helpSchemaCommand = &cobra.Command{
Use: "help-schema",
Short: "Print a documented TOML schema to STDOUT",
Run: func(cmd *cobra.Command, args []string) {
fmt.Print(documentedSchema)
},
}
func init() {
Command.AddCommand(helpSchemaCommand)
}
const documentedSchema = `title = "Documented schema"
# limit the maximum number of series generated across all measurements
#
# series-limit: integer, optional (default: unlimited)
[[measurements]]
# name of measurement
#
# NOTE:
# Multiple definitions of the same measurement name are allowed and
# will be merged together.
name = "cpu"
# sample: float; where 0 < sample 1.0 (default: 0.5)
# sample a subset of the tag set
#
# sample 25% of the tags
#
sample = 0.25
# Keys for defining a tag
#
# name: string, required
# Name of field
#
# source: array<string> or object
#
# A literal array of string values defines the tag values.
#
# An object defines more complex generators. The type key determines the
# type of generator.
#
# source types:
#
# type: "sequence"
# generate a sequence of tag values
#
# format: string
# a format string for the values (default: "value%s")
# start: int (default: 0)
# beginning value
# count: int, required
# ending value
#
# type: "file"
# generate a sequence of tag values from a file source.
# The data in the file is sorted, deduplicated and verified is valid UTF-8
#
# path: string
# absolute path or relative path to current toml file
tags = [
# example sequence tag source. The range of values are automatically
# prefixed with 0s
# to ensure correct sort behavior.
{ name = "host", source = { type = "sequence", format = "host-%s", start = 0, count = 5 } },
# tags can also be sourced from a file. The path is relative to the
# schema.toml.
# Each value must be on a new line. The file is also sorted, deduplicated
# and UTF-8 validated.
{ name = "rack", source = { type = "file", path = "files/racks.txt" } },
# Example string array source, which is also deduplicated and sorted
{ name = "region", source = ["us-west-01","us-west-02","us-east"] },
]
# Keys for defining a field
#
# name: string, required
# Name of field
#
# count: int, required
# The maximum number of values to generate. When multiple fields
# have the same count and time-spec, they will share timestamps.
#
# A time-spec can be either time-precision or time-interval, which
# determines how timestamps are generated and may also influence
# the time range and number of values generated.
#
# time-precision: string [ns, us, ms, s, m, h] (default: ms)
# Specifies the precision (rounding) for generated timestamps.
#
# If the precision results in fewer than "count" intervals for the
# given time range the number of values will be reduced.
#
# Example:
# count = 1000, start = 0s, end = 100s, time-precison = s
# 100 values will be generated at [0s, 1s, 2s, ..., 99s]
#
# If the precision results in greater than "count" intervals for the
# given time range, the interval will be rounded to the nearest multiple of
# time-precision.
#
# Example:
# count = 10, start = 0s, end = 100s, time-precison = s
# 100 values will be generated at [0s, 10s, 20s, ..., 90s]
#
# time-interval: Go duration string (eg 90s, 1h30m)
# Specifies the delta between generated timestamps.
#
# If the delta results in fewer than "count" intervals for the
# given time range the number of values will be reduced.
#
# Example:
# count = 100, start = 0s, end = 100s, time-interval = 10s
# 10 values will be generated at [0s, 10s, 20s, ..., 90s]
#
# If the delta results in greater than "count" intervals for the
# given time range, the start-time will be adjusted to ensure "count" values.
#
# Example:
# count = 20, start = 0s, end = 1000s, time-interval = 10s
# 20 values will be generated at [800s, 810s, ..., 900s, ..., 990s]
#
# source: int, float, boolean, string, array or object
#
# A literal int, float, boolean or string will produce
# a constant value of the same data type.
#
# A literal array of homogeneous values will generate a repeating
# sequence.
#
# An object defines more complex generators. The type key determines the
# type of generator.
#
# source types:
#
# type: "rand<float>"
# generate random float values
# seed: seed to random number generator (default: 0)
# min: minimum value (default: 0.0)
# max: maximum value (default: 1.0)
#
# type: "zipf<integer>"
# generate random integer values using a Zipf distribution
# The generator generates values k [0, imax] such that P(k)
# is proportional to (v + k) ** (-s). Requirements: s > 1 and v 1.
# See https://golang.org/pkg/math/rand/#NewZipf for more information.
#
# seed: seed to random number generator (default: 0)
# s: float > 1 (required)
# v: float 1 (required)
# imax: integer (required)
#
fields = [
# Example constant float
{ name = "system", count = 5000, source = 2.5 },
# Example random floats
{ name = "user", count = 5000, source = { type = "rand<float>", seed = 10, min = 0.0, max = 1.0 } },
]
# Multiple measurements may be defined.
[[measurements]]
name = "mem"
tags = [
{ name = "host", source = { type = "sequence", format = "host-%s", start = 0, count = 5 } },
{ name = "region", source = ["us-west-01","us-west-02","us-east"] },
]
fields = [
# An example of a sequence of integer values
{ name = "free", count = 100, source = [10,15,20,25,30,35,30], time-precision = "ms" },
{ name = "low_mem", count = 100, source = [false,true,true], time-precision = "ms" },
]
`

View File

@ -1,85 +0,0 @@
package generate
import (
"os"
"strings"
"text/template"
"github.com/influxdata/influxdb/v2/pkg/data/gen"
"github.com/spf13/cobra"
)
var simpleCommand = &cobra.Command{
Use: "simple",
Short: "Generate simple data sets using only CLI flags",
RunE: simpleGenerateFE,
}
var simpleFlags struct {
schemaSpec SchemaSpec
}
func init() {
simpleCommand.PersistentFlags().SortFlags = false
simpleCommand.Flags().SortFlags = false
simpleFlags.schemaSpec.AddFlags(simpleCommand, simpleCommand.Flags())
Command.AddCommand(simpleCommand)
}
func simpleGenerateFE(_ *cobra.Command, _ []string) error {
storagePlan, err := flags.storageSpec.Plan()
if err != nil {
return err
}
storagePlan.PrintPlan(os.Stdout)
schemaPlan, err := simpleFlags.schemaSpec.Plan(storagePlan)
if err != nil {
return err
}
schemaPlan.PrintPlan(os.Stdout)
spec := planToSpec(schemaPlan)
if err = assignOrgBucket(spec); err != nil {
return err
}
if flags.printOnly {
return nil
}
return exec(storagePlan, spec)
}
var (
tomlSchema = template.Must(template.New("schema").Parse(`
title = "CLI schema"
[[measurements]]
name = "m0"
sample = 1.0
tags = [
{{- range $i, $e := .Tags }}
{ name = "tag{{$i}}", source = { type = "sequence", format = "value%s", start = 0, count = {{$e}} } },{{ end }}
]
fields = [
{ name = "v0", count = {{ .PointsPerSeries }}, source = 1.0 },
]`))
)
func planToSpec(p *SchemaPlan) *gen.Spec {
var sb strings.Builder
if err := tomlSchema.Execute(&sb, p); err != nil {
panic(err)
}
spec, err := gen.NewSpecFromToml(sb.String())
if err != nil {
panic(err)
}
return spec
}

View File

@ -1,162 +0,0 @@
package generate
import (
"context"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"github.com/influxdata/influxdb/v2/cmd/influxd/generate/internal/shard"
"github.com/influxdata/influxdb/v2/kit/errors"
"github.com/influxdata/influxdb/v2/models"
"github.com/influxdata/influxdb/v2/pkg/data/gen"
"github.com/influxdata/influxdb/v2/pkg/limiter"
"github.com/influxdata/influxdb/v2/storage"
"github.com/influxdata/influxdb/v2/tsdb"
"github.com/influxdata/influxdb/v2/tsdb/seriesfile"
"github.com/influxdata/influxdb/v2/tsdb/tsi1"
"github.com/influxdata/influxdb/v2/tsdb/tsm1"
)
type Generator struct {
sfile *seriesfile.SeriesFile
// Clean specifies whether to clean any of the data related files
Clean CleanLevel
}
func (g *Generator) Run(ctx context.Context, path string, gen gen.SeriesGenerator) ([]string, error) {
path = filepath.Join(path, "engine")
config := storage.NewConfig()
switch g.Clean {
case CleanLevelTSM:
if err := os.RemoveAll(path); err != nil {
return nil, err
}
case CleanLevelAll:
if err := os.RemoveAll(path); err != nil {
return nil, err
}
}
g.sfile = seriesfile.NewSeriesFile(config.GetSeriesFilePath(path))
if err := g.sfile.Open(ctx); err != nil {
return nil, err
}
defer g.sfile.Close()
g.sfile.DisableCompactions()
ti := tsi1.NewIndex(g.sfile, config.Index, tsi1.WithPath(config.GetIndexPath(path)))
if err := ti.Open(ctx); err != nil {
return nil, fmt.Errorf("error opening TSI1 index: %s", err.Error())
}
files, err := g.writeShard(ti, gen, config.GetEnginePath(path))
if err != nil {
return nil, fmt.Errorf("error writing data: %s", err.Error())
}
ti.Compact()
ti.Wait()
if err := ti.Close(); err != nil {
return nil, fmt.Errorf("error compacting TSI1 index: %s", err.Error())
}
var (
wg sync.WaitGroup
errs errors.List
)
parts := g.sfile.Partitions()
wg.Add(len(parts))
ch := make(chan error, len(parts))
limit := limiter.NewFixed(runtime.NumCPU())
for i := range parts {
go func(n int) {
limit.Take()
defer func() {
wg.Done()
limit.Release()
}()
p := parts[n]
c := seriesfile.NewSeriesPartitionCompactor()
if _, err := c.Compact(p); err != nil {
ch <- fmt.Errorf("error compacting series partition %d: %s", n, err.Error())
}
}(i)
}
wg.Wait()
close(ch)
for e := range ch {
errs.Append(e)
}
if err := errs.Err(); err != nil {
return nil, err
}
return files, nil
}
// seriesBatchSize specifies the number of series keys passed to the index.
const seriesBatchSize = 1000
func (g *Generator) writeShard(idx *tsi1.Index, sg gen.SeriesGenerator, path string) ([]string, error) {
if err := os.MkdirAll(path, 0777); err != nil {
return nil, err
}
sw, err := shard.NewWriter(path, shard.AutoNumber())
if err != nil {
return nil, err
}
defer sw.Close()
coll := &tsdb.SeriesCollection{
Keys: make([][]byte, 0, seriesBatchSize),
Names: make([][]byte, 0, seriesBatchSize),
Tags: make([]models.Tags, 0, seriesBatchSize),
Types: make([]models.FieldType, 0, seriesBatchSize),
}
for sg.Next() {
seriesKey := sg.Key()
coll.Keys = append(coll.Keys, seriesKey)
coll.Names = append(coll.Names, sg.ID())
coll.Tags = append(coll.Tags, sg.Tags())
coll.Types = append(coll.Types, sg.FieldType())
if coll.Length() == seriesBatchSize {
if err := idx.CreateSeriesListIfNotExists(coll); err != nil {
return nil, err
}
coll.Truncate(0)
}
vg := sg.TimeValuesGenerator()
key := tsm1.SeriesFieldKeyBytes(string(seriesKey), string(sg.Field()))
for vg.Next() {
sw.WriteV(key, vg.Values())
}
if err := sw.Err(); err != nil {
return nil, err
}
}
if coll.Length() > 0 {
if err := idx.CreateSeriesListIfNotExists(coll); err != nil {
return nil, err
}
}
return sw.Files(), nil
}

View File

@ -1,191 +0,0 @@
package shard
import (
"fmt"
"os"
"path/filepath"
"github.com/influxdata/influxdb/v2/pkg/data/gen"
"github.com/influxdata/influxdb/v2/tsdb/tsm1"
)
const (
maxTSMFileSize = uint32(2048 * 1024 * 1024) // 2GB
)
type Writer struct {
tw tsm1.TSMWriter
path string
ext string
files []string
gen, seq int
err error
buf []byte
auto bool
}
type option func(w *Writer)
// Generation specifies the generation number of the tsm files.
func Generation(gen int) option {
return func(w *Writer) {
w.gen = gen
}
}
// Sequence specifies the starting sequence number of the tsm files.
func Sequence(seq int) option {
return func(w *Writer) {
w.seq = seq
}
}
// Temporary configures the writer to create tsm.tmp files.
func Temporary() option {
return func(w *Writer) {
w.ext = tsm1.TSMFileExtension + "." + tsm1.TmpTSMFileExtension
}
}
// AutoNumber will read the existing TSM file names and use generation + 1
func AutoNumber() option {
return func(w *Writer) {
w.auto = true
}
}
func NewWriter(path string, opts ...option) (*Writer, error) {
w := &Writer{path: path, gen: 1, seq: 1, ext: tsm1.TSMFileExtension}
for _, opt := range opts {
opt(w)
}
if w.auto {
err := w.readExisting()
if err != nil {
return nil, err
}
}
w.nextTSM()
if w.err != nil {
return nil, w.err
}
return w, nil
}
func (w *Writer) Write(key []byte, values tsm1.Values) {
if w.err != nil {
return
}
if w.tw.Size() > maxTSMFileSize {
w.closeTSM()
w.nextTSM()
}
if err := w.tw.Write(key, values); err != nil {
if err == tsm1.ErrMaxBlocksExceeded {
w.closeTSM()
w.nextTSM()
} else {
w.err = err
}
}
}
func (w *Writer) WriteV(key []byte, values gen.Values) {
if w.err != nil {
return
}
if w.tw.Size() > maxTSMFileSize {
w.closeTSM()
w.nextTSM()
}
minT, maxT := values.MinTime(), values.MaxTime()
var err error
if w.buf, err = values.Encode(w.buf); err != nil {
w.err = err
return
}
if err := w.tw.WriteBlock(key, minT, maxT, w.buf); err != nil {
if err == tsm1.ErrMaxBlocksExceeded {
w.closeTSM()
w.nextTSM()
} else {
w.err = err
}
}
}
// Close closes the writer.
func (w *Writer) Close() {
if w.tw != nil {
w.closeTSM()
}
}
func (w *Writer) Err() error { return w.err }
// Files returns the full paths of all the files written by the Writer.
func (w *Writer) Files() []string { return w.files }
func (w *Writer) nextTSM() {
fileName := filepath.Join(w.path, fmt.Sprintf("%s.%s", tsm1.DefaultFormatFileName(w.gen, w.seq), w.ext))
w.files = append(w.files, fileName)
w.seq++
if fi, _ := os.Stat(fileName); fi != nil {
w.err = fmt.Errorf("attempted to overwrite an existing TSM file: %q", fileName)
return
}
fd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
w.err = err
return
}
// Create the writer for the new TSM file.
w.tw, err = tsm1.NewTSMWriter(fd)
if err != nil {
w.err = err
return
}
}
func (w *Writer) closeTSM() {
if err := w.tw.WriteIndex(); err != nil && err != tsm1.ErrNoValues {
w.err = err
}
if err := w.tw.Close(); err != nil && w.err == nil {
w.err = err
}
w.tw = nil
}
func (w *Writer) readExisting() error {
files, err := filepath.Glob(filepath.Join(w.path, fmt.Sprintf("*.%s", tsm1.TSMFileExtension)))
if err != nil {
return err
}
for _, f := range files {
generation, _, err := tsm1.DefaultParseFileName(f)
if err != nil {
return err
}
if generation >= w.gen {
w.gen = generation + 1
}
}
return nil
}

View File

@ -1,60 +0,0 @@
package generate
import (
"fmt"
"io"
"strings"
"text/tabwriter"
"time"
)
type StoragePlan struct {
Organization string
Bucket string
StartTime time.Time
EndTime time.Time
Clean CleanLevel
Path string
}
func (p *StoragePlan) String() string {
sb := new(strings.Builder)
p.PrintPlan(sb)
return sb.String()
}
func (p *StoragePlan) PrintPlan(w io.Writer) {
tw := tabwriter.NewWriter(w, 25, 4, 2, ' ', 0)
fmt.Fprintf(tw, "Organization\t%s\n", p.Organization)
fmt.Fprintf(tw, "Bucket\t%s\n", p.Bucket)
fmt.Fprintf(tw, "Start time\t%s\n", p.StartTime)
fmt.Fprintf(tw, "End time\t%s\n", p.EndTime)
fmt.Fprintf(tw, "Clean data\t%s\n", p.Clean)
tw.Flush()
}
// TimeSpan returns the total duration for which the data set.
func (p *StoragePlan) TimeSpan() time.Duration {
return p.EndTime.Sub(p.StartTime)
}
type SchemaPlan struct {
StoragePlan *StoragePlan
Tags TagCardinalities
PointsPerSeries int
}
func (p *SchemaPlan) String() string {
sb := new(strings.Builder)
p.PrintPlan(sb)
return sb.String()
}
func (p *SchemaPlan) PrintPlan(w io.Writer) {
tw := tabwriter.NewWriter(w, 25, 4, 2, ' ', 0)
fmt.Fprintf(tw, "Tag cardinalities\t%s\n", p.Tags)
fmt.Fprintf(tw, "Points per series\t%d\n", p.PointsPerSeries)
fmt.Fprintf(tw, "Total points\t%d\n", p.Tags.Cardinality()*p.PointsPerSeries)
fmt.Fprintf(tw, "Total series\t%d\n", p.Tags.Cardinality())
_ = tw.Flush()
}

View File

@ -1,153 +0,0 @@
package generate
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
type CleanLevel int
const (
// CleanLevelNone will not remove any data files.
CleanLevelNone CleanLevel = iota
// CleanLevelTSM will only remove TSM data files.
CleanLevelTSM
// CleanLevelAll will remove all TSM and index data files.
CleanLevelAll
)
func (i CleanLevel) String() string {
switch i {
case CleanLevelNone:
return "none"
case CleanLevelTSM:
return "tsm"
case CleanLevelAll:
return "all"
default:
return strconv.FormatInt(int64(i), 10)
}
}
func (i *CleanLevel) Set(v string) error {
switch v {
case "none":
*i = CleanLevelNone
case "tsm":
*i = CleanLevelTSM
case "all":
*i = CleanLevelAll
default:
return fmt.Errorf("invalid clean level %q, must be none, tsm or all", v)
}
return nil
}
func (i CleanLevel) Type() string {
return "clean-level"
}
type StorageSpec struct {
Organization string
Bucket string
StartTime string
EndTime string
Clean CleanLevel
}
func (a *StorageSpec) AddFlags(cmd *cobra.Command, fs *pflag.FlagSet) {
fs.StringVar(&a.Organization, "org", "", "Name of organization")
cmd.MarkFlagRequired("org")
fs.StringVar(&a.Bucket, "bucket", "", "Name of bucket")
cmd.MarkFlagRequired("bucket")
start := time.Now().UTC().AddDate(0, 0, -7).Truncate(24 * time.Hour)
fs.StringVar(&a.StartTime, "start-time", start.Format(time.RFC3339), "Start time")
fs.StringVar(&a.EndTime, "end-time", start.AddDate(0, 0, 7).Format(time.RFC3339), "End time")
fs.Var(&a.Clean, "clean", "Clean time series data files (none, tsm or all)")
}
func (a *StorageSpec) Plan() (*StoragePlan, error) {
plan := &StoragePlan{
Organization: a.Organization,
Bucket: a.Bucket,
Clean: a.Clean,
}
if a.StartTime != "" {
if t, err := time.Parse(time.RFC3339, a.StartTime); err != nil {
return nil, err
} else {
plan.StartTime = t.UTC()
}
}
if a.EndTime != "" {
if t, err := time.Parse(time.RFC3339, a.EndTime); err != nil {
return nil, err
} else {
plan.EndTime = t.UTC()
}
}
return plan, nil
}
type TagCardinalities []int
func (t TagCardinalities) String() string {
s := make([]string, 0, len(t))
for i := 0; i < len(t); i++ {
s = append(s, strconv.Itoa(t[i]))
}
return fmt.Sprintf("[%s]", strings.Join(s, ","))
}
func (t TagCardinalities) Cardinality() int {
n := 1
for i := range t {
n *= t[i]
}
return n
}
func (t *TagCardinalities) Set(tags string) error {
*t = (*t)[:0]
for _, s := range strings.Split(tags, ",") {
v, err := strconv.Atoi(s)
if err != nil {
return fmt.Errorf("cannot parse tag cardinality: %s", s)
}
*t = append(*t, v)
}
return nil
}
func (t *TagCardinalities) Type() string {
return "tags"
}
type SchemaSpec struct {
Tags TagCardinalities
PointsPerSeries int
}
func (s *SchemaSpec) AddFlags(cmd *cobra.Command, fs *pflag.FlagSet) {
s.Tags = []int{10, 10, 10}
fs.Var(&s.Tags, "t", "Tag cardinality")
fs.IntVar(&s.PointsPerSeries, "p", 100, "Points per series")
}
func (s *SchemaSpec) Plan(sp *StoragePlan) (*SchemaPlan, error) {
return &SchemaPlan{
StoragePlan: sp,
Tags: s.Tags,
PointsPerSeries: s.PointsPerSeries,
}, nil
}

View File

@ -1,135 +0,0 @@
package inspect
import (
"context"
"fmt"
"io"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"github.com/influxdata/influxdb/v2/cmd/influx_inspect/buildtsi"
"github.com/influxdata/influxdb/v2/logger"
"github.com/influxdata/influxdb/v2/storage"
"github.com/influxdata/influxdb/v2/tsdb/seriesfile"
"github.com/influxdata/influxdb/v2/tsdb/tsi1"
"github.com/influxdata/influxdb/v2/tsdb/tsm1"
"github.com/spf13/cobra"
)
const defaultBatchSize = 10000
var buildTSIFlags = struct {
// Standard input/output, overridden for testing.
Stderr io.Writer
Stdout io.Writer
// Data path options
DataPath string // optional. Defaults to <engine_path>/engine/data
WALPath string // optional. Defaults to <engine_path>/engine/wal
SeriesFilePath string // optional. Defaults to <engine_path>/engine/_series
IndexPath string // optional. Defaults to <engine_path>/engine/index
BatchSize int // optional. Defaults to 10000
MaxLogFileSize int64 // optional. Defaults to tsi1.DefaultMaxIndexLogFileSize
MaxCacheSize uint64 // optional. Defaults to tsm1.DefaultCacheMaxMemorySize
Concurrency int // optional. Defaults to GOMAXPROCS(0)
Verbose bool // optional. Defaults to false.
}{
Stderr: os.Stderr,
Stdout: os.Stdout,
}
// NewBuildTSICommand returns a new instance of Command with default setting applied.
func NewBuildTSICommand() *cobra.Command {
cmd := &cobra.Command{
Use: "build-tsi",
Short: "Rebuilds the TSI index and (where necessary) the Series File.",
Long: `This command will rebuild the TSI index and if needed the Series
File.
The index is built by reading all of the TSM indexes in the TSM data
directory, and all of the WAL entries in the WAL data directory. If the
Series File directory is missing, then the series file will be rebuilt.
If the TSI index directory already exists, then this tool will fail.
Performance of the tool can be tweaked by adjusting the max log file size,
max cache file size and the batch size.
max-log-file-size determines how big in-memory parts of the index have to
get before they're compacted into memory-mappable index files.
Consider decreasing this from the default if you find the heap
requirements of your TSI index are too much.
max-cache-size refers to the maximum cache size allowed. If there are WAL
files to index, then they need to be replayed into a tsm1.Cache first
by this tool. If the maximum cache size isn't large enough then there
will be an error and this tool will fail. Increase max-cache-size to
address this.
batch-size refers to the size of the batches written into the index.
Increasing this can improve performance but can result in much more
memory usage.
`,
RunE: RunBuildTSI,
}
defaultPath := filepath.Join(os.Getenv("HOME"), "/.influxdbv2/engine/")
defaultDataPath := filepath.Join(defaultPath, storage.DefaultEngineDirectoryName)
defaultWALPath := filepath.Join(defaultPath, storage.DefaultWALDirectoryName)
defaultSFilePath := filepath.Join(defaultPath, storage.DefaultSeriesFileDirectoryName)
defaultIndexPath := filepath.Join(defaultPath, storage.DefaultIndexDirectoryName)
cmd.Flags().StringVar(&buildTSIFlags.DataPath, "tsm-path", defaultDataPath, "Path to the TSM data directory. Defaults to "+defaultDataPath)
cmd.Flags().StringVar(&buildTSIFlags.WALPath, "wal-path", defaultWALPath, "Path to the WAL data directory. Defaults to "+defaultWALPath)
cmd.Flags().StringVar(&buildTSIFlags.SeriesFilePath, "sfile-path", defaultSFilePath, "Path to the Series File directory. Defaults to "+defaultSFilePath)
cmd.Flags().StringVar(&buildTSIFlags.IndexPath, "tsi-path", defaultIndexPath, "Path to the TSI index directory. Defaults to "+defaultIndexPath)
cmd.Flags().IntVar(&buildTSIFlags.Concurrency, "concurrency", runtime.GOMAXPROCS(0), "Number of workers to dedicate to shard index building. Defaults to GOMAXPROCS")
cmd.Flags().Int64Var(&buildTSIFlags.MaxLogFileSize, "max-log-file-size", tsi1.DefaultMaxIndexLogFileSize, "optional: maximum log file size")
cmd.Flags().Uint64Var(&buildTSIFlags.MaxCacheSize, "max-cache-size", uint64(tsm1.DefaultCacheMaxMemorySize), "optional: maximum cache size")
cmd.Flags().IntVar(&buildTSIFlags.BatchSize, "batch-size", defaultBatchSize, "optional: set the size of the batches we write to the index. Setting this can have adverse affects on performance and heap requirements")
cmd.Flags().BoolVar(&buildTSIFlags.Verbose, "v", false, "verbose")
cmd.SetOutput(buildTSIFlags.Stdout)
return cmd
}
// RunBuildTSI executes the run command for BuildTSI.
func RunBuildTSI(cmd *cobra.Command, args []string) error {
// Verify the user actually wants to run as root.
if isRoot() {
fmt.Fprintln(buildTSIFlags.Stdout, "You are currently running as root. This will build your")
fmt.Fprintln(buildTSIFlags.Stdout, "index files with root ownership and will be inaccessible")
fmt.Fprintln(buildTSIFlags.Stdout, "if you run influxd as a non-root user. You should run")
fmt.Fprintln(buildTSIFlags.Stdout, "influxd inspect buildtsi as the same user you are running influxd.")
fmt.Fprint(buildTSIFlags.Stdout, "Are you sure you want to continue? (y/N): ")
var answer string
if fmt.Scanln(&answer); !strings.HasPrefix(strings.TrimSpace(strings.ToLower(answer)), "y") {
return fmt.Errorf("operation aborted")
}
}
log := logger.New(buildTSIFlags.Stdout)
sfile := seriesfile.NewSeriesFile(buildTSIFlags.SeriesFilePath)
sfile.Logger = log
if err := sfile.Open(context.Background()); err != nil {
return err
}
defer sfile.Close()
return buildtsi.IndexShard(sfile, buildTSIFlags.IndexPath, buildTSIFlags.DataPath, buildTSIFlags.WALPath,
buildTSIFlags.MaxLogFileSize, buildTSIFlags.MaxCacheSize, buildTSIFlags.BatchSize,
log, buildTSIFlags.Verbose)
}
func isRoot() bool {
user, _ := user.Current()
return user != nil && user.Username == "root"
}

View File

@ -1,192 +0,0 @@
package inspect
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"github.com/influxdata/influxdb/v2/internal/fs"
"github.com/influxdata/influxdb/v2/storage"
"github.com/influxdata/influxdb/v2/tsdb/seriesfile"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
)
var compactSeriesFileFlags = struct {
// Standard input/output, overridden for testing.
Stderr io.Writer
Stdout io.Writer
// Data path options
SeriesFilePath string // optional. Defaults to <engine_path>/engine/_series
IndexPath string // optional. Defaults to <engine_path>/engine/index
Concurrency int // optional. Defaults to GOMAXPROCS(0)
}{
Stderr: os.Stderr,
Stdout: os.Stdout,
}
// NewCompactSeriesFileCommand returns a new instance of Command with default setting applied.
func NewCompactSeriesFileCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "compact-series-file",
Short: "Compacts the series file to removed deleted series.",
Long: `This command will compact the series file by removing deleted series.`,
RunE: RunCompactSeriesFile,
}
home, _ := fs.InfluxDir()
defaultPath := filepath.Join(home, "engine")
defaultSFilePath := filepath.Join(defaultPath, storage.DefaultSeriesFileDirectoryName)
defaultIndexPath := filepath.Join(defaultPath, storage.DefaultIndexDirectoryName)
cmd.Flags().StringVar(&compactSeriesFileFlags.SeriesFilePath, "sfile-path", defaultSFilePath, "Path to the Series File directory. Defaults to "+defaultSFilePath)
cmd.Flags().StringVar(&compactSeriesFileFlags.IndexPath, "tsi-path", defaultIndexPath, "Path to the TSI index directory. Defaults to "+defaultIndexPath)
cmd.Flags().IntVar(&compactSeriesFileFlags.Concurrency, "concurrency", runtime.GOMAXPROCS(0), "Number of workers to dedicate to compaction. Defaults to GOMAXPROCS. Max 8.")
cmd.SetOutput(compactSeriesFileFlags.Stdout)
return cmd
}
// RunCompactSeriesFile executes the run command for CompactSeriesFile.
func RunCompactSeriesFile(cmd *cobra.Command, args []string) error {
// Verify the user actually wants to run as root.
if isRoot() {
fmt.Fprintln(compactSeriesFileFlags.Stdout, "You are currently running as root. This will compact your")
fmt.Fprintln(compactSeriesFileFlags.Stdout, "series file with root ownership and will be inaccessible")
fmt.Fprintln(compactSeriesFileFlags.Stdout, "if you run influxd as a non-root user. You should run")
fmt.Fprintln(compactSeriesFileFlags.Stdout, "influxd inspect compact-series-file as the same user you are running influxd.")
fmt.Fprint(compactSeriesFileFlags.Stdout, "Are you sure you want to continue? (y/N): ")
var answer string
if fmt.Scanln(&answer); !strings.HasPrefix(strings.TrimSpace(strings.ToLower(answer)), "y") {
return fmt.Errorf("operation aborted")
}
}
paths, err := seriesFilePartitionPaths(compactSeriesFileFlags.SeriesFilePath)
if err != nil {
return err
}
// Build input channel.
pathCh := make(chan string, len(paths))
for _, path := range paths {
pathCh <- path
}
close(pathCh)
// Limit maximum concurrency to the total number of series file partitions.
concurrency := compactSeriesFileFlags.Concurrency
if concurrency > seriesfile.SeriesFilePartitionN {
concurrency = seriesfile.SeriesFilePartitionN
}
// Concurrently process each partition in the series file
var g errgroup.Group
for i := 0; i < concurrency; i++ {
g.Go(func() error {
for path := range pathCh {
if err := compactSeriesFilePartition(path); err != nil {
return err
}
}
return nil
})
}
if err := g.Wait(); err != nil {
return err
}
// Build new series file indexes
sfile := seriesfile.NewSeriesFile(compactSeriesFileFlags.SeriesFilePath)
if err = sfile.Open(context.Background()); err != nil {
return err
}
compactor := seriesfile.NewSeriesPartitionCompactor()
for _, partition := range sfile.Partitions() {
duration, err := compactor.Compact(partition)
if err != nil {
return err
}
fmt.Fprintf(compactSeriesFileFlags.Stdout, "compacted %s in %s\n", partition.Path(), duration)
}
return nil
}
func compactSeriesFilePartition(path string) error {
const tmpExt = ".tmp"
fmt.Fprintf(compactSeriesFileFlags.Stdout, "processing partition for %q\n", path)
// Open partition so index can recover from entries not in the snapshot.
partitionID, err := strconv.Atoi(filepath.Base(path))
if err != nil {
return fmt.Errorf("cannot parse partition id from path: %s", path)
}
p := seriesfile.NewSeriesPartition(partitionID, path)
if err := p.Open(); err != nil {
return fmt.Errorf("cannot open partition: path=%s err=%s", path, err)
}
defer p.Close()
// Loop over segments and compact.
indexPath := p.IndexPath()
var segmentPaths []string
for _, segment := range p.Segments() {
fmt.Fprintf(compactSeriesFileFlags.Stdout, "processing segment %q %d\n", segment.Path(), segment.ID())
if err := segment.CompactToPath(segment.Path()+tmpExt, p.Index()); err != nil {
return err
}
segmentPaths = append(segmentPaths, segment.Path())
}
// Close partition.
if err := p.Close(); err != nil {
return err
}
// Remove the old segment files and replace with new ones.
for _, dst := range segmentPaths {
src := dst + tmpExt
fmt.Fprintf(compactSeriesFileFlags.Stdout, "renaming new segment %q to %q\n", src, dst)
if err = os.Rename(src, dst); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("serious failure. Please rebuild index and series file: %v", err)
}
}
// Remove index file so it will be rebuilt when reopened.
fmt.Fprintln(compactSeriesFileFlags.Stdout, "removing index file", indexPath)
if err = os.Remove(indexPath); err != nil && !os.IsNotExist(err) { // index won't exist for low cardinality
return err
}
return nil
}
// seriesFilePartitionPaths returns the paths to each partition in the series file.
func seriesFilePartitionPaths(path string) ([]string, error) {
sfile := seriesfile.NewSeriesFile(path)
if err := sfile.Open(context.Background()); err != nil {
return nil, err
}
var paths []string
for _, partition := range sfile.Partitions() {
paths = append(paths, partition.Path())
}
if err := sfile.Close(); err != nil {
return nil, err
}
return paths, nil
}

View File

@ -1,141 +0,0 @@
// inspects low-level details about tsi1 files.
package inspect
import (
"errors"
"io"
"path/filepath"
"regexp"
"github.com/influxdata/influxdb/v2/internal/fs"
"github.com/influxdata/influxdb/v2/tsdb/tsi1"
"github.com/spf13/cobra"
"go.uber.org/zap"
)
// Command represents the program execution for "influxd dumptsi".
var measurementFilter, tagKeyFilter, tagValueFilter string
var dumpTSIFlags = struct {
// Standard input/output, overridden for testing.
Stderr io.Writer
Stdout io.Writer
seriesFilePath string
dataPath string
showSeries bool
showMeasurements bool
showTagKeys bool
showTagValues bool
showTagValueSeries bool
measurementFilter *regexp.Regexp
tagKeyFilter *regexp.Regexp
tagValueFilter *regexp.Regexp
}{}
// NewCommand returns a new instance of Command.
func NewDumpTSICommand() *cobra.Command {
cmd := &cobra.Command{
Use: "dump-tsi",
Short: "Dump low level tsi information",
Long: `Dumps low-level details about tsi1 files.
Usage: influx_inspect dumptsi [flags] path...
-series
Dump raw series data
-measurements
Dump raw measurement data
-tag-keys
Dump raw tag keys
-tag-values
Dump raw tag values
-tag-value-series
Dump raw series for each tag value
-measurement-filter REGEXP
Filters data by measurement regular expression
-series-path PATH
Path to the "_series" directory under the database data directory.
-index-path PATH
Path to the "index" directory under the database data directory.
-tag-key-filter REGEXP
Filters data by tag key regular expression
-tag-value-filter REGEXP
Filters data by tag value regular expression
`,
RunE: dumpTsi,
}
defaultDataDir, _ := fs.InfluxDir()
defaultDataDir = filepath.Join(defaultDataDir, "engine")
defaultIndexDir := filepath.Join(defaultDataDir, "index")
defaultSeriesDir := filepath.Join(defaultDataDir, "_series")
cmd.Flags().StringVar(&dumpTSIFlags.seriesFilePath, "series-path", defaultSeriesDir, "Path to series file")
cmd.Flags().StringVar(&dumpTSIFlags.dataPath, "index-path", defaultIndexDir, "Path to the index directory of the data engine")
cmd.Flags().BoolVar(&dumpTSIFlags.showSeries, "series", false, "Show raw series data")
cmd.Flags().BoolVar(&dumpTSIFlags.showMeasurements, "measurements", false, "Show raw measurement data")
cmd.Flags().BoolVar(&dumpTSIFlags.showTagKeys, "tag-keys", false, "Show raw tag key data")
cmd.Flags().BoolVar(&dumpTSIFlags.showTagValues, "tag-values", false, "Show raw tag value data")
cmd.Flags().BoolVar(&dumpTSIFlags.showTagValueSeries, "tag-value-series", false, "Show raw series data for each value")
cmd.Flags().StringVar(&measurementFilter, "measurement-filter", "", "Regex measurement filter")
cmd.Flags().StringVar(&tagKeyFilter, "tag-key-filter", "", "Regex tag key filter")
cmd.Flags().StringVar(&tagValueFilter, "tag-value-filter", "", "Regex tag value filter")
return cmd
}
func dumpTsi(cmd *cobra.Command, args []string) error {
logger := zap.NewNop()
// Parse filters.
if measurementFilter != "" {
re, err := regexp.Compile(measurementFilter)
if err != nil {
return err
}
dumpTSIFlags.measurementFilter = re
}
if tagKeyFilter != "" {
re, err := regexp.Compile(tagKeyFilter)
if err != nil {
return err
}
dumpTSIFlags.tagKeyFilter = re
}
if tagValueFilter != "" {
re, err := regexp.Compile(tagValueFilter)
if err != nil {
return err
}
dumpTSIFlags.tagValueFilter = re
}
if dumpTSIFlags.dataPath == "" {
return errors.New("data path must be specified")
}
// Some flags imply other flags.
if dumpTSIFlags.showTagValueSeries {
dumpTSIFlags.showTagValues = true
}
if dumpTSIFlags.showTagValues {
dumpTSIFlags.showTagKeys = true
}
if dumpTSIFlags.showTagKeys {
dumpTSIFlags.showMeasurements = true
}
dump := tsi1.NewDumpTSI(logger)
dump.SeriesFilePath = dumpTSIFlags.seriesFilePath
dump.DataPath = dumpTSIFlags.dataPath
dump.ShowSeries = dumpTSIFlags.showSeries
dump.ShowMeasurements = dumpTSIFlags.showMeasurements
dump.ShowTagKeys = dumpTSIFlags.showTagKeys
dump.ShowTagValueSeries = dumpTSIFlags.showTagValueSeries
dump.MeasurementFilter = dumpTSIFlags.measurementFilter
dump.TagKeyFilter = dumpTSIFlags.tagKeyFilter
dump.TagValueFilter = dumpTSIFlags.tagValueFilter
return dump.Run()
}

View File

@ -1,57 +0,0 @@
package inspect
import (
"os"
"github.com/influxdata/influxdb/v2/kit/errors"
"github.com/influxdata/influxdb/v2/storage/wal"
"github.com/spf13/cobra"
)
var dumpWALFlags = struct {
findDuplicates bool
}{}
func NewDumpWALCommand() *cobra.Command {
dumpTSMWALCommand := &cobra.Command{
Use: "dumpwal",
Short: "Dump TSM data from WAL files",
Long: `
This tool dumps data from WAL files for debugging purposes. Given a list of filepath globs
(patterns which match to .wal file paths), the tool will parse and print out the entries in each file.
It has two modes of operation, depending on the --find-duplicates flag.
--find-duplicates=false (default): for each file, the following is printed:
* The file name
* for each entry,
* The type of the entry (either [write] or [delete-bucket-range]);
* The formatted entry contents
--find-duplicates=true: for each file, the following is printed:
* The file name
* A list of keys in the file that have out of order timestamps
`,
RunE: inspectDumpWAL,
}
dumpTSMWALCommand.Flags().BoolVarP(
&dumpWALFlags.findDuplicates,
"find-duplicates", "", false, "ignore dumping entries; only report keys in the WAL that are out of order")
return dumpTSMWALCommand
}
func inspectDumpWAL(cmd *cobra.Command, args []string) error {
dumper := &wal.Dump{
Stdout: os.Stdout,
Stderr: os.Stderr,
FileGlobs: args,
FindDuplicates: dumpWALFlags.findDuplicates,
}
if len(args) == 0 {
return errors.New("no files provided. aborting")
}
_, err := dumper.Run(true)
return err
}

View File

@ -1,30 +0,0 @@
package inspect
import (
"os"
"github.com/influxdata/influxdb/v2/tsdb/tsm1"
"github.com/spf13/cobra"
)
func NewExportBlocksCommand() *cobra.Command {
return &cobra.Command{
Use: `export-blocks`,
Short: "Exports block data",
Long: `
This command will export all blocks in one or more TSM1 files to
another format for easier inspection and debugging.`,
RunE: func(cmd *cobra.Command, args []string) error {
e := tsm1.NewSQLBlockExporter(os.Stdout)
for _, arg := range args {
if err := e.ExportFile(arg); err != nil {
return err
}
}
if err := e.Close(); err != nil {
return err
}
return nil
},
}
}

View File

@ -2,13 +2,10 @@ package inspect
import (
"bufio"
"context"
"os"
"path/filepath"
"github.com/influxdata/influxdb/v2/internal/fs"
"github.com/influxdata/influxdb/v2/tsdb/seriesfile"
"github.com/influxdata/influxdb/v2/tsdb/tsi1"
"github.com/influxdata/influxdb/v2/tsdb"
"github.com/influxdata/influxdb/v2/tsdb/index/tsi1"
"github.com/spf13/cobra"
)
@ -21,26 +18,23 @@ This command will export all series in a TSI index to
SQL format for easier inspection and debugging.`,
}
defaultDataDir, _ := fs.InfluxDir()
defaultDataDir = filepath.Join(defaultDataDir, "engine")
defaultIndexDir := filepath.Join(defaultDataDir, "index")
defaultSeriesDir := filepath.Join(defaultDataDir, "_series")
var seriesFilePath, dataPath string
cmd.Flags().StringVar(&seriesFilePath, "series-path", defaultSeriesDir, "Path to series file")
cmd.Flags().StringVar(&dataPath, "index-path", defaultIndexDir, "Path to the index directory of the data engine")
cmd.Flags().StringVar(&seriesFilePath, "series-path", "", "Path to series file")
cmd.Flags().StringVar(&dataPath, "index-path", "", "Path to the index directory of the data engine")
_ = cmd.MarkFlagRequired("series-path")
_ = cmd.MarkFlagRequired("index-path")
cmd.RunE = func(cmd *cobra.Command, args []string) error {
// Initialize series file.
sfile := seriesfile.NewSeriesFile(seriesFilePath)
if err := sfile.Open(context.Background()); err != nil {
sfile := tsdb.NewSeriesFile(seriesFilePath)
if err := sfile.Open(); err != nil {
return err
}
defer sfile.Close()
// Open index.
idx := tsi1.NewIndex(sfile, tsi1.NewConfig(), tsi1.WithPath(dataPath), tsi1.DisableCompactions())
if err := idx.Open(context.Background()); err != nil {
idx := tsi1.NewIndex(sfile, "", tsi1.WithPath(dataPath), tsi1.DisableCompactions())
if err := idx.Open(); err != nil {
return err
}
defer idx.Close()

View File

@ -14,17 +14,17 @@ func NewCommand() *cobra.Command {
// List of available sub-commands
// If a new sub-command is created, it must be added here
subCommands := []*cobra.Command{
NewBuildTSICommand(),
NewCompactSeriesFileCommand(),
NewExportBlocksCommand(),
//NewBuildTSICommand(),
//NewCompactSeriesFileCommand(),
//NewExportBlocksCommand(),
NewExportIndexCommand(),
NewReportTSMCommand(),
NewVerifyTSMCommand(),
NewVerifyWALCommand(),
NewReportTSICommand(),
NewVerifySeriesFileCommand(),
NewDumpWALCommand(),
NewDumpTSICommand(),
//NewReportTSMCommand(),
//NewVerifyTSMCommand(),
//NewVerifyWALCommand(),
//NewReportTSICommand(),
//NewVerifySeriesFileCommand(),
//NewDumpWALCommand(),
//NewDumpTSICommand(),
}
base.AddCommand(subCommands...)

View File

@ -1,99 +0,0 @@
package inspect
import (
"errors"
"io"
"os"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/tsdb/tsi1"
"github.com/spf13/cobra"
)
// Command represents the program execution for "influxd inspect report-tsi".
var reportTSIFlags = struct {
// Standard input/output, overridden for testing.
Stderr io.Writer
Stdout io.Writer
// Data path options
Path string // optional. Defaults to dbPath/engine/index
SeriesFilePath string // optional. Defaults to dbPath/_series
// Tenant filtering options
Org string
Bucket string
// Reporting options
TopN int
ByMeasurement bool
byTagKey bool // currently unused
}{}
// NewReportTsiCommand returns a new instance of Command with default setting applied.
func NewReportTSICommand() *cobra.Command {
cmd := &cobra.Command{
Use: "report-tsi",
Short: "Reports the cardinality of TSI files",
Long: `This command will analyze TSI files within a storage engine directory, reporting
the cardinality of data within the files, divided into org and bucket cardinalities.
For each report, the following is output:
* All orgs and buckets in the index;
* The series cardinality within each org and each bucket;
* The time taken to read the index.
Depending on the --measurements flag, series cardinality is segmented
in the following ways:
* Series cardinality for each organization;
* Series cardinality for each bucket;
* Series cardinality for each measurement;`,
RunE: RunReportTSI,
}
cmd.Flags().StringVar(&reportTSIFlags.Path, "path", os.Getenv("HOME")+"/.influxdbv2/engine/index", "Path to index. Defaults $HOME/.influxdbv2/engine/index")
cmd.Flags().StringVar(&reportTSIFlags.SeriesFilePath, "series-file", os.Getenv("HOME")+"/.influxdbv2/engine/_series", "Optional path to series file. Defaults $HOME/.influxdbv2/engine/_series")
cmd.Flags().BoolVarP(&reportTSIFlags.ByMeasurement, "measurements", "m", false, "Segment cardinality by measurements")
cmd.Flags().IntVarP(&reportTSIFlags.TopN, "top", "t", 0, "Limit results to top n")
cmd.Flags().StringVarP(&reportTSIFlags.Bucket, "bucket_id", "b", "", "If bucket is specified, org must be specified. A bucket id must be a base-16 string")
cmd.Flags().StringVarP(&reportTSIFlags.Org, "org_id", "o", "", "Only specified org data will be reported. An org id must be a base-16 string")
cmd.SetOutput(reportTSIFlags.Stdout)
return cmd
}
// RunReportTSI executes the run command for ReportTSI.
func RunReportTSI(cmd *cobra.Command, args []string) error {
report := tsi1.NewReportCommand()
report.DataPath = reportTSIFlags.Path
report.ByMeasurement = reportTSIFlags.ByMeasurement
report.TopN = reportTSIFlags.TopN
report.SeriesDirPath = reportTSIFlags.SeriesFilePath
report.Stdout = os.Stdout
report.Stderr = os.Stderr
var err error
if reportTSIFlags.Org != "" {
if report.OrgID, err = influxdb.IDFromString(reportTSIFlags.Org); err != nil {
return err
}
}
if reportTSIFlags.Bucket != "" {
if report.BucketID, err = influxdb.IDFromString(reportTSIFlags.Bucket); err != nil {
return err
} else if report.OrgID == nil {
return errors.New("org must be provided if filtering by bucket")
}
}
// Run command with printing enabled
if _, err = report.Run(true); err != nil {
return err
}
return nil
}

View File

@ -1,109 +0,0 @@
package inspect
import (
"fmt"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/internal/fs"
"github.com/influxdata/influxdb/v2/kit/errors"
"github.com/influxdata/influxdb/v2/tsdb/tsm1"
"github.com/spf13/cobra"
"os"
"path/filepath"
)
// reportTSMFlags defines the `report-tsm` Command.
var reportTSMFlags = struct {
pattern string
exact bool
detailed bool
orgID, bucketID string
dataDir string
}{}
func NewReportTSMCommand() *cobra.Command {
reportTSMCommand := &cobra.Command{
Use: "report-tsm",
Short: "Run TSM report",
Long: `
This command will analyze TSM files within a storage engine directory, reporting
the cardinality within the files as well as the time range that the point data
covers.
This command only interrogates the index within each file, and does not read any
block data. To reduce heap requirements, by default report-tsm estimates the
overall cardinality in the file set by using the HLL++ algorithm. Exact
cardinalities can be determined by using the --exact flag.
For each file, the following is output:
* The full filename;
* The series cardinality within the file;
* The number of series first encountered within the file;
* The min and max timestamp associated with TSM data in the file; and
* The time taken to load the TSM index and apply any tombstones.
The summary section then outputs the total time range and series cardinality for
the fileset. Depending on the --detailed flag, series cardinality is segmented
in the following ways:
* Series cardinality for each organization;
* Series cardinality for each bucket;
* Series cardinality for each measurement;
* Number of field keys for each measurement; and
* Number of tag values for each tag key.`,
RunE: inspectReportTSMF,
}
reportTSMCommand.Flags().StringVarP(&reportTSMFlags.pattern, "pattern", "", "", "only process TSM files containing pattern")
reportTSMCommand.Flags().BoolVarP(&reportTSMFlags.exact, "exact", "", false, "calculate and exact cardinality count. Warning, may use significant memory...")
reportTSMCommand.Flags().BoolVarP(&reportTSMFlags.detailed, "detailed", "", false, "emit series cardinality segmented by measurements, tag keys and fields. Warning, may take a while.")
reportTSMCommand.Flags().StringVarP(&reportTSMFlags.orgID, "org-id", "", "", "process only data belonging to organization ID.")
reportTSMCommand.Flags().StringVarP(&reportTSMFlags.bucketID, "bucket-id", "", "", "process only data belonging to bucket ID. Requires org flag to be set.")
dir, err := fs.InfluxDir()
if err != nil {
panic(err)
}
dir = filepath.Join(dir, "engine/data")
reportTSMCommand.Flags().StringVarP(&reportTSMFlags.dataDir, "data-dir", "", dir, fmt.Sprintf("use provided data directory (defaults to %s).", dir))
return reportTSMCommand
}
// inspectReportTSMF runs the report-tsm tool.
func inspectReportTSMF(cmd *cobra.Command, args []string) error {
report := &tsm1.Report{
Stderr: os.Stderr,
Stdout: os.Stdout,
Dir: reportTSMFlags.dataDir,
Pattern: reportTSMFlags.pattern,
Detailed: reportTSMFlags.detailed,
Exact: reportTSMFlags.exact,
}
if reportTSMFlags.orgID == "" && reportTSMFlags.bucketID != "" {
return errors.New("org-id must be set for non-empty bucket-id")
}
if reportTSMFlags.orgID != "" {
orgID, err := influxdb.IDFromString(reportTSMFlags.orgID)
if err != nil {
return err
}
report.OrgID = orgID
}
if reportTSMFlags.bucketID != "" {
bucketID, err := influxdb.IDFromString(reportTSMFlags.bucketID)
if err != nil {
return err
}
report.BucketID = bucketID
}
_, err := report.Run(true)
return err
}

View File

@ -1,69 +0,0 @@
package inspect
import (
"os"
"runtime"
"github.com/influxdata/influxdb/v2/logger"
"github.com/influxdata/influxdb/v2/tsdb/seriesfile"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
// NewVerifySeriesFileCommand returns a new instance of verifySeriesCommand
// for execution of "influx_inspect verify-seriesfile".
func NewVerifySeriesFileCommand() *cobra.Command {
verifySeriesCommand := &cobra.Command{
Use: "verify-seriesfile",
Short: "Verifies the integrity of Series files",
Long: `Verifies the integrity of Series files.
Usage: influx_inspect verify-seriesfile [flags]
--series-file <path>
Path to a series file. This defaults to ` + os.Getenv("HOME") + `/.influxdbv2/engine/_series.
--v
Enable verbose logging.
--c
How many concurrent workers to run.
Defaults to "` + string(runtime.GOMAXPROCS(0)) + `" on this machine.`,
RunE: verifySeriesRun,
}
verifySeriesCommand.Flags().StringVar(&VerifySeriesFlags.seriesFile, "series-file", os.Getenv("HOME")+"/.influxdbv2/engine/_series",
"Path to a series file. This defaults to "+os.Getenv("HOME")+"/.influxdbv2/engine/_series")
verifySeriesCommand.Flags().BoolVarP(&VerifySeriesFlags.verbose, "v", "v", false,
"Verbose output.")
verifySeriesCommand.Flags().IntVarP(&VerifySeriesFlags.concurrent, "c", "c", runtime.GOMAXPROCS(0),
"How many concurrent workers to run.")
return verifySeriesCommand
}
var VerifySeriesFlags = struct {
seriesFile string
verbose bool
concurrent int
}{}
// verifySeriesRun executes the command.
func verifySeriesRun(cmd *cobra.Command, args []string) error {
config := logger.NewConfig()
config.Level = zapcore.WarnLevel
if VerifySeriesFlags.verbose {
config.Level = zapcore.InfoLevel
}
logger, err := config.New(os.Stderr)
if err != nil {
return err
}
v := seriesfile.NewVerify()
v.Logger = logger
v.Concurrent = VerifySeriesFlags.concurrent
if VerifySeriesFlags.seriesFile != "" {
_, err := v.VerifySeriesFile(VerifySeriesFlags.seriesFile)
return err
}
return nil
}

View File

@ -1,72 +0,0 @@
package inspect
import (
"fmt"
"os"
"path/filepath"
"github.com/influxdata/influxdb/v2/kit/cli"
"github.com/influxdata/influxdb/v2/tsdb/tsm1"
"github.com/spf13/cobra"
)
// verifyTSMFlags defines the `verify-tsm` Command.
var verifyTSMFlags = struct {
cli.OrgBucket
path string
}{}
func NewVerifyTSMCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "verify-tsm <pathspec>...",
Short: "Checks the consistency of TSM files",
Long: `
This command will analyze a set of TSM files for inconsistencies between the
TSM index and the blocks.
The checks performed by this command are:
* CRC-32 checksums match for each block
* TSM index min and max timestamps match decoded data
OPTIONS
<pathspec>...
A list of files or directories to search for TSM files.
An optional organization or organization and bucket may be specified to limit
the analysis.
`,
RunE: verifyTSMF,
}
verifyTSMFlags.AddFlags(cmd)
return cmd
}
func verifyTSMF(cmd *cobra.Command, args []string) error {
verify := tsm1.VerifyTSM{
Stdout: os.Stdout,
OrgID: verifyTSMFlags.Org,
BucketID: verifyTSMFlags.Bucket,
}
// resolve all pathspecs
for _, arg := range args {
fi, err := os.Stat(arg)
if err != nil {
fmt.Printf("Error processing path %q: %v", arg, err)
continue
}
if fi.IsDir() {
files, _ := filepath.Glob(filepath.Join(arg, "*."+tsm1.TSMFileExtension))
verify.Paths = append(verify.Paths, files...)
} else {
verify.Paths = append(verify.Paths, arg)
}
}
return verify.Run()
}

View File

@ -1,57 +0,0 @@
package inspect
import (
"fmt"
"github.com/influxdata/influxdb/v2/internal/fs"
"github.com/influxdata/influxdb/v2/storage/wal"
"github.com/spf13/cobra"
"os"
"path/filepath"
)
func NewVerifyWALCommand() *cobra.Command {
verifyWALCommand := &cobra.Command{
Use: `verify-wal`,
Short: "Check for WAL corruption",
Long: `
This command will analyze the WAL (Write-Ahead Log) in a storage directory to
check if there are any corrupt files. If any corrupt files are found, the names
of said corrupt files will be reported. The tool will also count the total number
of entries in the scanned WAL files, in case this is of interest.
For each file, the following is output:
* The file name;
* "clean" (if the file is clean) OR
The first position of any corruption that is found
In the summary section, the following is printed:
* The number of WAL files scanned;
* The number of WAL entries scanned;
* A list of files found to be corrupt`,
RunE: inspectVerifyWAL,
}
dir, err := fs.InfluxDir()
if err != nil {
panic(err)
}
dir = filepath.Join(dir, "engine/wal")
verifyWALCommand.Flags().StringVarP(&verifyWALFlags.dataDir, "data-dir", "", dir, fmt.Sprintf("use provided data directory (defaults to %s).", dir))
return verifyWALCommand
}
var verifyWALFlags = struct {
dataDir string
}{}
// inspectReportTSMF runs the report-tsm tool.
func inspectVerifyWAL(cmd *cobra.Command, args []string) error {
report := &wal.Verifier{
Stderr: os.Stderr,
Stdout: os.Stdout,
Dir: verifyWALFlags.dataDir,
}
_, err := report.Run(true)
return err
}

View File

@ -6,15 +6,13 @@ import (
"io/ioutil"
"os"
"sync"
"time"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/http"
"github.com/influxdata/influxdb/v2/kit/prom"
"github.com/influxdata/influxdb/v2/models"
"github.com/influxdata/influxdb/v2/storage"
"github.com/influxdata/influxdb/v2/storage/reads"
"github.com/influxdata/influxdb/v2/tsdb/cursors"
"github.com/influxdata/influxql"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
@ -25,13 +23,15 @@ var _ Engine = (*storage.Engine)(nil)
// to facilitate testing.
type Engine interface {
influxdb.DeleteService
reads.Viewer
storage.PointsWriter
storage.BucketDeleter
storage.EngineSchema
prom.PrometheusCollector
influxdb.BackupService
SeriesCardinality() int64
SeriesCardinality(orgID, bucketID influxdb.ID) int64
TSDBStore() storage.TSDBStore
MetaClient() storage.MetaClient
WithLogger(log *zap.Logger)
Open(context.Context) error
@ -51,7 +51,8 @@ type TemporaryEngine struct {
mu sync.Mutex
opened bool
engine *storage.Engine
engine *storage.Engine
tsdbStore temporaryTSDBStore
log *zap.Logger
}
@ -89,6 +90,8 @@ func (t *TemporaryEngine) Open(ctx context.Context) error {
return err
}
t.tsdbStore.TSDBStore = t.engine.TSDBStore()
t.opened = true
return nil
}
@ -105,21 +108,29 @@ func (t *TemporaryEngine) Close() error {
}
// WritePoints stores points into the storage engine.
func (t *TemporaryEngine) WritePoints(ctx context.Context, points []models.Point) error {
return t.engine.WritePoints(ctx, points)
func (t *TemporaryEngine) WritePoints(ctx context.Context, orgID influxdb.ID, bucketID influxdb.ID, points []models.Point) error {
return t.engine.WritePoints(ctx, orgID, bucketID, points)
}
// SeriesCardinality returns the number of series in the engine.
func (t *TemporaryEngine) SeriesCardinality() int64 {
return t.engine.SeriesCardinality()
func (t *TemporaryEngine) SeriesCardinality(orgID, bucketID influxdb.ID) int64 {
return t.engine.SeriesCardinality(orgID, bucketID)
}
// DeleteBucketRangePredicate will delete a bucket from the range and predicate.
func (t *TemporaryEngine) DeleteBucketRangePredicate(ctx context.Context, orgID, bucketID influxdb.ID, min, max int64, pred influxdb.Predicate, opts influxdb.DeletePrefixRangeOptions) error {
return t.engine.DeleteBucketRangePredicate(ctx, orgID, bucketID, min, max, pred, opts)
func (t *TemporaryEngine) DeleteBucketRangePredicate(ctx context.Context, orgID, bucketID influxdb.ID, min, max int64, pred influxdb.Predicate) error {
return t.engine.DeleteBucketRangePredicate(ctx, orgID, bucketID, min, max, pred)
}
func (t *TemporaryEngine) CreateBucket(ctx context.Context, b *influxdb.Bucket) error {
return t.engine.CreateBucket(ctx, b)
}
func (t *TemporaryEngine) UpdateBucketRetentionPeriod(ctx context.Context, bucketID influxdb.ID, d time.Duration) error {
return t.engine.UpdateBucketRetentionPeriod(ctx, bucketID, d)
}
// DeleteBucket deletes a bucket from the time-series data.
func (t *TemporaryEngine) DeleteBucket(ctx context.Context, orgID, bucketID influxdb.ID) error {
return t.engine.DeleteBucket(ctx, orgID, bucketID)
@ -136,26 +147,6 @@ func (t *TemporaryEngine) PrometheusCollectors() []prometheus.Collector {
return t.engine.PrometheusCollectors()
}
// CreateCursorIterator calls into the underlying engines CreateCurorIterator.
func (t *TemporaryEngine) CreateCursorIterator(ctx context.Context) (cursors.CursorIterator, error) {
return t.engine.CreateCursorIterator(ctx)
}
// CreateSeriesCursor calls into the underlying engines CreateSeriesCursor.
func (t *TemporaryEngine) CreateSeriesCursor(ctx context.Context, orgID, bucketID influxdb.ID, cond influxql.Expr) (storage.SeriesCursor, error) {
return t.engine.CreateSeriesCursor(ctx, orgID, bucketID, cond)
}
// TagKeys calls into the underlying engines TagKeys.
func (t *TemporaryEngine) TagKeys(ctx context.Context, orgID, bucketID influxdb.ID, start, end int64, predicate influxql.Expr) (cursors.StringIterator, error) {
return t.engine.TagKeys(ctx, orgID, bucketID, start, end, predicate)
}
// TagValues calls into the underlying engines TagValues.
func (t *TemporaryEngine) TagValues(ctx context.Context, orgID, bucketID influxdb.ID, tagKey string, start, end int64, predicate influxql.Expr) (cursors.StringIterator, error) {
return t.engine.TagValues(ctx, orgID, bucketID, tagKey, start, end, predicate)
}
// Flush will remove the time-series files and re-open the engine.
func (t *TemporaryEngine) Flush(ctx context.Context) {
if err := t.Close(); err != nil {
@ -178,3 +169,15 @@ func (t *TemporaryEngine) FetchBackupFile(ctx context.Context, backupID int, bac
func (t *TemporaryEngine) InternalBackupPath(backupID int) string {
return t.engine.InternalBackupPath(backupID)
}
func (t *TemporaryEngine) TSDBStore() storage.TSDBStore {
return &t.tsdbStore
}
func (t *TemporaryEngine) MetaClient() storage.MetaClient {
return t.engine.MetaClient()
}
type temporaryTSDBStore struct {
storage.TSDBStore
}

View File

@ -27,6 +27,8 @@ import (
"github.com/influxdata/influxdb/v2/endpoints"
"github.com/influxdata/influxdb/v2/gather"
"github.com/influxdata/influxdb/v2/http"
iqlcontrol "github.com/influxdata/influxdb/v2/influxql/control"
iqlquery "github.com/influxdata/influxdb/v2/influxql/query"
"github.com/influxdata/influxdb/v2/inmem"
"github.com/influxdata/influxdb/v2/internal/fs"
"github.com/influxdata/influxdb/v2/kit/cli"
@ -46,6 +48,7 @@ import (
"github.com/influxdata/influxdb/v2/pkger"
infprom "github.com/influxdata/influxdb/v2/prometheus"
"github.com/influxdata/influxdb/v2/query"
"github.com/influxdata/influxdb/v2/query/builtinlazy"
"github.com/influxdata/influxdb/v2/query/control"
"github.com/influxdata/influxdb/v2/query/fluxlang"
"github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
@ -63,8 +66,11 @@ import (
"github.com/influxdata/influxdb/v2/task/backend/scheduler"
"github.com/influxdata/influxdb/v2/telemetry"
"github.com/influxdata/influxdb/v2/tenant"
_ "github.com/influxdata/influxdb/v2/tsdb/tsi1" // needed for tsi1
_ "github.com/influxdata/influxdb/v2/tsdb/tsm1" // needed for tsm1
_ "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" // needed for tsm1
_ "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" // needed for tsi1
iqlcoordinator "github.com/influxdata/influxdb/v2/v1/coordinator"
"github.com/influxdata/influxdb/v2/v1/services/meta"
storage2 "github.com/influxdata/influxdb/v2/v1/services/storage"
"github.com/influxdata/influxdb/v2/vault"
pzap "github.com/influxdata/influxdb/v2/zap"
"github.com/opentracing/opentracing-go"
@ -130,6 +136,8 @@ func cmdRunE(ctx context.Context, l *Launcher) func() error {
// exit with SIGINT and SIGTERM
ctx = signals.WithStandardSignals(ctx)
builtinlazy.Initialize()
if err := l.run(ctx); err != nil {
return err
} else if !l.Running() {
@ -214,6 +222,12 @@ func launcherOpts(l *Launcher) []cli.Opt {
Default: false,
Desc: "add /debug/flush endpoint to clear stores; used for end-to-end tests",
},
{
DestP: &l.testingAlwaysAllowSetup,
Flag: "testing-always-allow-setup",
Default: false,
Desc: "ensures the /api/v2/setup endpoint always returns true to allow onboarding",
},
{
DestP: &l.enginePath,
Flag: "engine-path",
@ -374,11 +388,12 @@ type Launcher struct {
cancel func()
running bool
storeType string
assetsPath string
testing bool
sessionLength int // in minutes
sessionRenewDisabled bool
storeType string
assetsPath string
testing bool
testingAlwaysAllowSetup bool
sessionLength int // in minutes
sessionRenewDisabled bool
logLevel string
tracingType string
@ -399,9 +414,10 @@ type Launcher struct {
maxMemoryBytes int
queueSize int
boltClient *bolt.Client
kvStore kv.SchemaStore
kvService *kv.Service
boltClient *bolt.Client
kvStore kv.SchemaStore
kvService *kv.Service
//TODO fix
engine Engine
StorageConfig storage.Config
@ -706,18 +722,34 @@ func (m *Launcher) run(ctx context.Context) (err error) {
if m.pageFaultRate > 0 {
pageFaultLimiter = rate.NewLimiter(rate.Limit(m.pageFaultRate), 1)
}
_ = pageFaultLimiter
metaClient := meta.NewClient(meta.NewConfig(), m.kvStore)
if err := metaClient.Open(); err != nil {
m.log.Error("Failed to open meta client", zap.Error(err))
return err
}
if m.testing {
// the testing engine will write/read into a temporary directory
engine := NewTemporaryEngine(m.StorageConfig, storage.WithRetentionEnforcer(ts.BucketService))
engine := NewTemporaryEngine(
m.StorageConfig,
storage.WithRetentionEnforcer(ts.BucketService),
storage.WithMetaClient(metaClient),
)
flushers = append(flushers, engine)
m.engine = engine
} else {
// check for 2.x data / state from a prior 2.x
if err := checkForPriorVersion(ctx, m.log, m.boltPath, m.enginePath, ts.BucketService, metaClient); err != nil {
os.Exit(1)
}
m.engine = storage.NewEngine(
m.enginePath,
m.StorageConfig,
storage.WithRetentionEnforcer(ts.BucketService),
storage.WithPageFaultLimiter(pageFaultLimiter),
storage.WithMetaClient(metaClient),
)
}
m.engine.WithLogger(m.log)
@ -735,7 +767,7 @@ func (m *Launcher) run(ctx context.Context) (err error) {
)
deps, err := influxdb.NewDependencies(
storageflux.NewReader(readservice.NewStore(m.engine)),
storageflux.NewReader(storage2.NewStore(m.engine.TSDBStore(), m.engine.MetaClient())),
m.engine,
authorizer.NewBucketService(ts.BucketService),
authorizer.NewOrgService(ts.OrganizationService),
@ -831,6 +863,25 @@ func (m *Launcher) run(ctx context.Context) (err error) {
dbrpSvc := dbrp.NewService(ctx, authorizer.NewBucketService(ts.BucketService), m.kvStore)
dbrpSvc = dbrp.NewAuthorizedService(dbrpSvc)
cm := iqlcontrol.NewControllerMetrics([]string{})
m.reg.MustRegister(cm.PrometheusCollectors()...)
mapper := &iqlcoordinator.LocalShardMapper{
MetaClient: metaClient,
TSDBStore: m.engine.TSDBStore(),
DBRP: dbrpSvc,
}
qe := iqlquery.NewExecutor(m.log, cm)
se := &iqlcoordinator.StatementExecutor{
MetaClient: metaClient,
TSDBStore: m.engine.TSDBStore(),
ShardMapper: mapper,
DBRP: dbrpSvc,
}
qe.StatementExecutor = se
qe.StatementNormalizer = se
var checkSvc platform.CheckService
{
coordinator := coordinator.NewCoordinator(m.log, m.scheduler, m.executor)
@ -963,6 +1014,16 @@ func (m *Launcher) run(ctx context.Context) (err error) {
ts.BucketService = storage.NewBucketService(ts.BucketService, m.engine)
ts.BucketService = dbrp.NewBucketService(m.log, ts.BucketService, dbrpSvc)
var onboardOpts []tenant.OnboardServiceOptionFn
if m.testingAlwaysAllowSetup {
onboardOpts = append(onboardOpts, tenant.WithAlwaysAllowInitialUser())
}
onboardSvc := tenant.NewOnboardService(ts, authSvc, onboardOpts...) // basic service
onboardSvc = tenant.NewAuthedOnboardSvc(onboardSvc) // with auth
onboardSvc = tenant.NewOnboardingMetrics(m.reg, onboardSvc, metric.WithSuffix("new")) // with metrics
onboardSvc = tenant.NewOnboardingLogger(m.log.With(zap.String("handler", "onboard")), onboardSvc) // with logging
m.apibackend = &http.APIBackend{
AssetsPath: m.assetsPath,
HTTPErrorHandler: kithttp.ErrorHandler(0),
@ -984,6 +1045,7 @@ func (m *Launcher) run(ctx context.Context) (err error) {
BucketService: ts.BucketService,
SessionService: sessionSvc,
UserService: ts.UserService,
OnboardingService: onboardSvc,
DBRPService: dbrpSvc,
OrganizationService: ts.OrganizationService,
UserResourceMappingService: ts.UserResourceMappingService,
@ -997,6 +1059,7 @@ func (m *Launcher) run(ctx context.Context) (err error) {
VariableService: variableSvc,
PasswordsService: ts.PasswordsService,
InfluxQLService: storageQueryService,
InfluxqldService: iqlquery.NewProxyExecutor(m.log, qe),
FluxService: storageQueryService,
FluxLanguageService: fluxlang.DefaultService,
TaskService: taskSvc,
@ -1060,16 +1123,7 @@ func (m *Launcher) run(ctx context.Context) (err error) {
}
userHTTPServer := ts.NewUserHTTPHandler(m.log)
var onboardHTTPServer *tenant.OnboardHandler
{
onboardSvc := tenant.NewOnboardService(ts, authSvc) // basic service
onboardSvc = tenant.NewAuthedOnboardSvc(onboardSvc) // with auth
onboardSvc = tenant.NewOnboardingMetrics(m.reg, onboardSvc, metric.WithSuffix("new")) // with metrics
onboardSvc = tenant.NewOnboardingLogger(m.log.With(zap.String("handler", "onboard")), onboardSvc) // with logging
onboardHTTPServer = tenant.NewHTTPOnboardHandler(m.log, onboardSvc)
}
onboardHTTPServer := tenant.NewHTTPOnboardHandler(m.log, onboardSvc)
// feature flagging for new labels service
var oldLabelHandler nethttp.Handler
@ -1233,6 +1287,53 @@ func (m *Launcher) run(ctx context.Context) (err error) {
return nil
}
func checkForPriorVersion(ctx context.Context, log *zap.Logger, boltPath string, enginePath string, bs platform.BucketService, metaClient *meta.Client) error {
buckets, _, err := bs.FindBuckets(ctx, platform.BucketFilter{})
if err != nil {
log.Error("Failed to retrieve buckets", zap.Error(err))
return err
}
hasErrors := false
// if there are no buckets, we will be fine
if len(buckets) > 0 {
log.Info("Checking InfluxDB metadata for prior version.", zap.String("bolt_path", boltPath))
for i := range buckets {
bucket := buckets[i]
if dbi := metaClient.Database(bucket.ID.String()); dbi == nil {
log.Error("Missing metadata for bucket.", zap.String("bucket", bucket.Name), zap.Stringer("bucket_id", bucket.ID))
hasErrors = true
}
}
if hasErrors {
log.Error("Incompatible InfluxDB 2.0 metadata found. File must be moved before influxd will start.", zap.String("path", boltPath))
}
}
// see if there are existing files which match the old directory structure
{
for _, name := range []string{"_series", "index"} {
dir := filepath.Join(enginePath, name)
if fi, err := os.Stat(dir); err == nil {
if fi.IsDir() {
log.Error("Found directory that is incompatible with this version of InfluxDB.", zap.String("path", dir))
hasErrors = true
}
}
}
}
if hasErrors {
log.Error("Incompatible InfluxDB 2.0 version found. Move all files outside of engine_path before influxd will start.", zap.String("engine_path", enginePath))
return errors.New("incompatible InfluxDB version")
}
return nil
}
// isAddressPortAvailable checks whether the address:port is available to listen,
// by using net.Listen to verify that the port opens successfully, then closes the listener.
func isAddressPortAvailable(address string, port int) (bool, error) {

View File

@ -85,6 +85,9 @@ func RunTestLauncherOrFail(tb testing.TB, ctx context.Context, flagger feature.F
// Passed arguments will overwrite/add to the default ones.
func (tl *TestLauncher) Run(ctx context.Context, args ...string) error {
largs := make([]string, 0, len(args)+8)
largs = append(largs, "--store", "memory")
largs = append(largs, "--e2e-testing")
largs = append(largs, "--testing-always-allow-setup")
largs = append(largs, "--bolt-path", filepath.Join(tl.Path, bolt.DefaultFilename))
largs = append(largs, "--engine-path", filepath.Join(tl.Path, "engine"))
largs = append(largs, "--http-bind-address", "127.0.0.1:0")
@ -137,15 +140,7 @@ func (tl *TestLauncher) SetupOrFail(tb testing.TB) {
// OnBoard attempts an on-boarding request.
// The on-boarding status is also reset to allow multiple user/org/buckets to be created.
func (tl *TestLauncher) OnBoard(req *platform.OnboardingRequest) (*platform.OnboardingResults, error) {
res, err := tl.KeyValueService().OnboardInitialUser(context.Background(), req)
if err != nil {
return nil, err
}
err = tl.KeyValueService().PutOnboardingStatus(context.Background(), false)
if err != nil {
return nil, err
}
return res, nil
return tl.apibackend.OnboardingService.OnboardInitialUser(context.Background(), req)
}
// OnBoardOrFail attempts an on-boarding request or fails on error.

View File

@ -19,10 +19,7 @@ import (
"github.com/influxdata/flux/csv"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/execute/executetest"
"github.com/influxdata/flux/execute/table"
"github.com/influxdata/flux/lang"
"github.com/influxdata/flux/memory"
"github.com/influxdata/flux/runtime"
"github.com/influxdata/flux/values"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/cmd/influxd/launcher"
@ -30,6 +27,7 @@ import (
"github.com/influxdata/influxdb/v2/kit/feature"
"github.com/influxdata/influxdb/v2/kit/prom"
"github.com/influxdata/influxdb/v2/mock"
"github.com/influxdata/influxdb/v2/pkg/flux/execute/table"
"github.com/influxdata/influxdb/v2/query"
)
@ -223,7 +221,7 @@ func queryPoints(ctx context.Context, t *testing.T, l *launcher.TestLauncher, op
if d.verbose {
t.Logf("query:\n%s", qs)
}
pkg, err := runtime.ParseToJSON(qs)
pkg, err := flux.Parse(qs)
if err != nil {
t.Fatal(err)
}
@ -753,194 +751,8 @@ from(bucket: "%s")
}
}
type TestQueryProfiler struct{
start int64
}
func (s TestQueryProfiler) Name() string {
return fmt.Sprintf("query%d", s.start)
}
func (s TestQueryProfiler) GetResult(q flux.Query, alloc *memory.Allocator) (flux.Table, error) {
groupKey := execute.NewGroupKey(
[]flux.ColMeta{
{
Label: "_measurement",
Type: flux.TString,
},
},
[]values.Value{
values.NewString(fmt.Sprintf("profiler/query%d", s.start)),
},
)
b := execute.NewColListTableBuilder(groupKey, alloc)
colMeta := []flux.ColMeta{
{
Label: "_measurement",
Type: flux.TString,
},
{
Label: "TotalDuration",
Type: flux.TInt,
},
{
Label: "CompileDuration",
Type: flux.TInt,
},
{
Label: "QueueDuration",
Type: flux.TInt,
},
{
Label: "PlanDuration",
Type: flux.TInt,
},
{
Label: "RequeueDuration",
Type: flux.TInt,
},
{
Label: "ExecuteDuration",
Type: flux.TInt,
},
{
Label: "Concurrency",
Type: flux.TInt,
},
{
Label: "MaxAllocated",
Type: flux.TInt,
},
{
Label: "TotalAllocated",
Type: flux.TInt,
},
{
Label: "RuntimeErrors",
Type: flux.TString,
},
{
Label: "influxdb/scanned-bytes",
Type: flux.TInt,
},
{
Label: "influxdb/scanned-values",
Type: flux.TInt,
},
{
Label: "flux/query-plan",
Type: flux.TString,
},
}
colData := []interface{} {
fmt.Sprintf("profiler/query%d", s.start),
s.start,
s.start + 1,
s.start + 2,
s.start + 3,
s.start + 4,
s.start + 5,
s.start + 6,
s.start + 7,
s.start + 8,
"error1\nerror2",
s.start + 9,
s.start + 10,
"query plan",
}
for _, col := range colMeta {
if _, err := b.AddCol(col); err != nil {
return nil, err
}
}
for i := 0; i < len(colData); i++ {
if intValue, ok := colData[i].(int64); ok {
b.AppendInt(i, intValue)
} else {
b.AppendString(i, colData[i].(string))
}
}
tbl, err := b.Table()
if err != nil {
return nil, err
}
return tbl, nil
}
func TestFluxProfiler(t *testing.T) {
testcases := []struct {
name string
data []string
query string
want string
}{
{
name: "range last single point start time",
data: []string{
"m,tag=a f=1i 1",
},
query: `
option profiler.enabledProfilers = ["query0", "query100", "query100", "NonExistentProfiler"]
from(bucket: v.bucket)
|> range(start: 1970-01-01T00:00:00.000000001Z, stop: 1970-01-01T01:00:00Z)
|> last()
`,
want: `
#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string
#group,false,false,true,true,false,false,true,true,true
#default,_result,,,,,,,,
,result,table,_start,_stop,_time,_value,_field,_measurement,tag
,,0,1970-01-01T00:00:00.000000001Z,1970-01-01T01:00:00Z,1970-01-01T00:00:00.000000001Z,1,f,m,a
#datatype,string,long,string,long,long,long,long,long,long,long,long,long,string,string,long,long
#group,false,false,true,false,false,false,false,false,false,false,false,false,false,false,false,false
#default,_profiler,,,,,,,,,,,,,,,
,result,table,_measurement,TotalDuration,CompileDuration,QueueDuration,PlanDuration,RequeueDuration,ExecuteDuration,Concurrency,MaxAllocated,TotalAllocated,RuntimeErrors,flux/query-plan,influxdb/scanned-bytes,influxdb/scanned-values
,,0,profiler/query0,0,1,2,3,4,5,6,7,8,"error1
error2","query plan",9,10
,,1,profiler/query100,100,101,102,103,104,105,106,107,108,"error1
error2","query plan",109,110
`,
},
}
execute.RegisterProfilers(&TestQueryProfiler{}, &TestQueryProfiler{start: 100})
for _, tc := range testcases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
l := launcher.RunTestLauncherOrFail(t, ctx, nil)
l.SetupOrFail(t)
defer l.ShutdownOrFail(t, ctx)
l.WritePointsOrFail(t, strings.Join(tc.data, "\n"))
queryStr := "import \"profiler\"\nv = {bucket: " + "\"" + l.Bucket.Name + "\"" + "}\n" + tc.query
req := &query.Request{
Authorization: l.Auth,
OrganizationID: l.Org.ID,
Compiler: lang.FluxCompiler{
Query: queryStr,
},
}
if got, err := l.FluxQueryService().Query(ctx, req); err != nil {
t.Error(err)
} else {
dec := csv.NewMultiResultDecoder(csv.ResultDecoderConfig{})
want, err := dec.Decode(ioutil.NopCloser(strings.NewReader(tc.want)))
if err != nil {
t.Fatal(err)
}
defer want.Release()
if err := executetest.EqualResultIterators(want, got); err != nil {
t.Fatal(err)
}
}
})
}
}
func TestQueryPushDowns(t *testing.T) {
t.Skip("Not supported yet")
testcases := []struct {
name string
data []string

View File

@ -4,16 +4,12 @@ import (
"fmt"
"io/ioutil"
nethttp "net/http"
"path/filepath"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/cmd/influxd/launcher"
"github.com/influxdata/influxdb/v2/http"
"github.com/influxdata/influxdb/v2/toml"
"github.com/influxdata/influxdb/v2/tsdb/tsm1"
)
func TestStorage_WriteAndQuery(t *testing.T) {
@ -130,7 +126,7 @@ func TestLauncher_BucketDelete(t *testing.T) {
// Verify the cardinality in the engine.
engine := l.Launcher.Engine()
if got, exp := engine.SeriesCardinality(), int64(1); got != exp {
if got, exp := engine.SeriesCardinality(l.Org.ID, l.Bucket.ID), int64(1); got != exp {
t.Fatalf("got %d, exp %d", got, exp)
}
@ -152,98 +148,7 @@ func TestLauncher_BucketDelete(t *testing.T) {
}
// Verify that the data has been removed from the storage engine.
if got, exp := engine.SeriesCardinality(), int64(0); got != exp {
if got, exp := engine.SeriesCardinality(l.Org.ID, l.Bucket.ID), int64(0); got != exp {
t.Fatalf("after bucket delete got %d, exp %d", got, exp)
}
}
func TestStorage_CacheSnapshot_Size(t *testing.T) {
l := launcher.NewTestLauncher(nil)
l.StorageConfig.Engine.Cache.SnapshotMemorySize = 10
l.StorageConfig.Engine.Cache.SnapshotAgeDuration = toml.Duration(time.Hour)
defer l.ShutdownOrFail(t, ctx)
if err := l.Run(ctx); err != nil {
t.Fatal(err)
}
l.SetupOrFail(t)
org1 := l.OnBoardOrFail(t, &influxdb.OnboardingRequest{
User: "USER-1",
Password: "PASSWORD-1",
Org: "ORG-01",
Bucket: "BUCKET",
})
// Execute single write against the server.
l.WriteOrFail(t, org1, `m,k=v1 f=100i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v2 f=101i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v3 f=102i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v4 f=103i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v5 f=104i 946684800000000000`)
// Wait for cache to snapshot. This should take no longer than one second.
time.Sleep(time.Second * 5)
// Check there is TSM data.
report := tsm1.Report{
Dir: filepath.Join(l.Path, "/engine/data"),
Exact: true,
}
summary, err := report.Run(false)
if err != nil {
t.Fatal(err)
}
// Five series should be in the snapshot
if got, exp := summary.Total, uint64(5); got != exp {
t.Fatalf("got %d series in TSM files, expected %d", got, exp)
}
}
func TestStorage_CacheSnapshot_Age(t *testing.T) {
l := launcher.NewTestLauncher(nil)
l.StorageConfig.Engine.Cache.SnapshotAgeDuration = toml.Duration(time.Second)
defer l.ShutdownOrFail(t, ctx)
if err := l.Run(ctx); err != nil {
t.Fatal(err)
}
l.SetupOrFail(t)
org1 := l.OnBoardOrFail(t, &influxdb.OnboardingRequest{
User: "USER-1",
Password: "PASSWORD-1",
Org: "ORG-01",
Bucket: "BUCKET",
})
// Execute single write against the server.
l.WriteOrFail(t, org1, `m,k=v1 f=100i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v2 f=101i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v3 f=102i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v4 f=102i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v5 f=102i 946684800000000000`)
// Wait for cache to snapshot. This should take no longer than one second.
time.Sleep(time.Second * 5)
// Check there is TSM data.
report := tsm1.Report{
Dir: filepath.Join(l.Path, "/engine/data"),
Exact: true,
}
summary, err := report.Run(false)
if err != nil {
t.Fatal(err)
}
// Five series should be in the snapshot
if got, exp := summary.Total, uint64(5); got != exp {
t.Fatalf("got %d series in TSM files, expected %d", got, exp)
}
}

View File

@ -9,12 +9,9 @@ import (
"github.com/influxdata/flux"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/cmd/influxd/generate"
"github.com/influxdata/influxdb/v2/cmd/influxd/launcher"
"github.com/influxdata/influxdb/v2/cmd/influxd/restore"
_ "github.com/influxdata/influxdb/v2/query/builtin"
_ "github.com/influxdata/influxdb/v2/tsdb/tsi1"
_ "github.com/influxdata/influxdb/v2/tsdb/tsm1"
_ "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1"
_ "github.com/influxdata/influxdb/v2/tsdb/index/tsi1"
"github.com/spf13/cobra"
)
@ -32,8 +29,9 @@ func main() {
influxdb.SetBuildInfo(version, commit, date)
rootCmd := launcher.NewInfluxdCommand(context.Background(),
generate.Command,
restore.Command,
// FIXME
//generate.Command,
//restore.Command,
&cobra.Command{
Use: "version",
Short: "Print the influxd server version",

View File

@ -8,10 +8,8 @@ import (
"strings"
"github.com/influxdata/influxdb/v2/bolt"
"github.com/influxdata/influxdb/v2/cmd/influxd/inspect"
"github.com/influxdata/influxdb/v2/internal/fs"
"github.com/influxdata/influxdb/v2/kit/cli"
"github.com/influxdata/influxdb/v2/storage"
"github.com/spf13/cobra"
)
@ -123,12 +121,14 @@ func restoreE(cmd *cobra.Command, args []string) error {
}
if flags.rebuildTSI {
sFilePath := filepath.Join(flags.enginePath, storage.DefaultSeriesFileDirectoryName)
indexPath := filepath.Join(flags.enginePath, storage.DefaultIndexDirectoryName)
// FIXME: Implement rebuildTSI
panic("not implemented")
//sFilePath := filepath.Join(flags.enginePath, storage.DefaultSeriesFileDirectoryName)
//indexPath := filepath.Join(flags.enginePath, storage.DefaultIndexDirectoryName)
rebuild := inspect.NewBuildTSICommand()
rebuild.SetArgs([]string{"--sfile-path", sFilePath, "--tsi-path", indexPath})
rebuild.Execute()
//rebuild := inspect.NewBuildTSICommand()
//rebuild.SetArgs([]string{"--sfile-path", sFilePath, "--tsi-path", indexPath})
//rebuild.Execute()
}
if err := removeTmpBolt(); err != nil {

View File

@ -30,7 +30,7 @@ func NewTestBoltStore(t *testing.T) (kv.Store, func(), error) {
ctx := context.Background()
logger := zaptest.NewLogger(t)
path := f.Name()
s := bolt.NewKVStore(logger, path)
s := bolt.NewKVStore(logger, path, bolt.WithNoSync)
if err := s.Open(context.Background()); err != nil {
return nil, nil, err
}

View File

@ -11,10 +11,5 @@ type Predicate interface {
// DeleteService will delete a bucket from the range and predict.
type DeleteService interface {
DeleteBucketRangePredicate(ctx context.Context, orgID, bucketID ID, min, max int64, pred Predicate, opts DeletePrefixRangeOptions) error
}
type DeletePrefixRangeOptions struct {
// If true, does not delete underlying series when all data has been deleted.
KeepSeries bool
DeleteBucketRangePredicate(ctx context.Context, orgID, bucketID ID, min, max int64, pred Predicate) error
}

View File

@ -138,9 +138,3 @@
key: enforceOrgDashboardLimits
default: false
contact: Compute Team
- name: Inject Latest Success Time
description: Inject the latest successful task run timestamp into a Task query extern when executing.
key: injectLatestSuccessTime
default: false
contact: Compute Team

View File

@ -6,7 +6,6 @@ import (
"github.com/influxdata/influxdb/v2/nats"
"github.com/influxdata/influxdb/v2/storage"
"github.com/influxdata/influxdb/v2/tsdb"
"go.uber.org/zap"
)
@ -21,12 +20,8 @@ func (s PointWriter) Record(collected MetricsCollection) error {
if err != nil {
return err
}
ps, err = tsdb.ExplodePoints(collected.OrgID, collected.BucketID, ps)
if err != nil {
return err
}
return s.Writer.WritePoints(context.TODO(), ps)
return s.Writer.WritePoints(context.TODO(), 0, 0, ps)
}
// Recorder record the metrics of a time based.

8
go.mod
View File

@ -5,6 +5,7 @@ go 1.13
require (
cloud.google.com/go/bigtable v1.3.0 // indirect
github.com/BurntSushi/toml v0.3.1
github.com/DATA-DOG/go-sqlmock v1.4.1 // indirect
github.com/NYTimes/gziphandler v1.0.1
github.com/RoaringBitmap/roaring v0.4.16
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883
@ -29,6 +30,7 @@ require (
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 // indirect
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 // indirect
github.com/go-chi/chi v4.1.0+incompatible
github.com/go-sql-driver/mysql v1.5.0 // indirect
github.com/go-stack/stack v1.8.0
github.com/gogo/protobuf v1.3.1
github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021
@ -47,7 +49,7 @@ require (
github.com/hashicorp/vault/api v1.0.2
github.com/imdario/mergo v0.3.9 // indirect
github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6
github.com/influxdata/flux v0.82.2
github.com/influxdata/flux v0.66.1
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69
github.com/influxdata/influxql v0.0.0-20180925231337-1cbfca8e56b6
github.com/influxdata/pkg-config v0.2.3
@ -86,18 +88,20 @@ require (
github.com/stretchr/testify v1.5.1
github.com/tcnksm/go-input v0.0.0-20180404061846-548a7d7a8ee8
github.com/testcontainers/testcontainers-go v0.0.0-20190108154635-47c0da630f72
github.com/tinylib/msgp v1.1.0 // indirect
github.com/tinylib/msgp v1.1.0
github.com/tylerb/graceful v1.2.15
github.com/uber-go/atomic v1.3.2 // indirect
github.com/uber/jaeger-client-go v2.16.0+incompatible
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
github.com/willf/bitset v1.1.9 // indirect
github.com/xlab/treeprint v1.0.0
github.com/yudai/gojsondiff v1.0.0
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect
github.com/yudai/pp v2.0.1+incompatible // indirect
go.uber.org/multierr v1.5.0
go.uber.org/zap v1.14.1
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e

57
go.sum
View File

@ -29,34 +29,10 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
github.com/Azure/go-autorest/autorest v0.10.1 h1:uaB8A32IZU9YKs9v50+/LWIWTDHJk2vlGzbfd7FfESI=
github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw=
github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM=
github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
@ -92,8 +68,6 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go v1.29.16 h1:Gbtod7Y4W/Ai7wPtesdvgGVTkFN8JxAaGouRLlcQfQs=
github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
github.com/benbjohnson/clock v0.0.0-20161215174838-7dc76406b6d3 h1:wOysYcIdqv3WnvwqFFzrYCFALPED7qkUGaLXu359GSc=
github.com/benbjohnson/clock v0.0.0-20161215174838-7dc76406b6d3/go.mod h1:UMqtWQTnOe4byzwe7Zhwh8f8s+36uszN51sJrSIZlTE=
github.com/benbjohnson/tmpl v1.0.0 h1:T5QPGJD0W6JJxyEEAlVnX3co/IkUrfHen1/42nlgAHo=
@ -113,7 +87,6 @@ github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e h1:oJCXMss/3rg5F6
github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e/go.mod h1:errmMKH8tTB49UR2A8C8DPYkyudelsYJwJFaZHQ6ik8=
github.com/c-bata/go-prompt v0.2.2 h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0=
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI=
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
@ -143,15 +116,11 @@ github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhr
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc h1:VRRKCwnzqk8QCaRC4os14xoKDdbHqqlJtJA0oc1ZAjg=
github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 h1:akOQj8IVgoeFfBTzGOEQakCYshWD6RNo1M5pivFXt70=
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/docker/distribution v2.7.0+incompatible h1:neUDAlf3wX6Ml4HdqTrbcOHXtfRN0TFIwt6YFL7N9RU=
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v0.7.3-0.20180815000130-e05b657120a6/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
@ -202,6 +171,7 @@ github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
@ -215,8 +185,6 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021 h1:HYV500jCgk+IC68L5sWrLFIWMpaUFfXXpJSAb7XOoBk=
github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw=
@ -350,8 +318,8 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6 h1:OtjKkeWDjUbyMi82C7XXy7Tvm2LXMwiBBXyFIGNPaGA=
github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6/go.mod h1:XabtPPW2qsCg0tl+kjaPU+cFS+CjQXEXbT1VJvHT4og=
github.com/influxdata/flux v0.82.2 h1:VtoF8pbyoS+3QLQQmihSmV0Ly6g/A73x+3VBUp9t15g=
github.com/influxdata/flux v0.82.2/go.mod h1:sAAIEgQTlTpsXCUQ49ymoRsKqraPzIb7F3paT72/lE0=
github.com/influxdata/flux v0.66.1 h1:d98L5k9mmP7bU7d2zAx6C3dCe5B8/PEa1wkWzZAE+Ok=
github.com/influxdata/flux v0.66.1/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY=
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 h1:WQsmW0fXO4ZE/lFGIE84G6rIV5SJN3P3sjIXAP1a8eU=
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA=
github.com/influxdata/influxql v0.0.0-20180925231337-1cbfca8e56b6 h1:CFx+pP90q/qg3spoiZjf8donE4WpAdjeJfPOcoNqkWo=
@ -368,8 +336,6 @@ github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 h1:+TUUmaF
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
@ -498,8 +464,6 @@ github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98=
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
@ -573,8 +537,6 @@ github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbd
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/snowflakedb/gosnowflake v1.3.4 h1:Gyoi6g4lMHsilEwW9+KV+bgYkJTgf5pVfvL7Utus920=
github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
@ -623,10 +585,6 @@ github.com/tylerb/graceful v1.2.15 h1:B0x01Y8fsJpogzZTkDg6BDi6eMf03s01lEKGdrv83o
github.com/tylerb/graceful v1.2.15/go.mod h1:LPYTbOYmUTdabwRt0TGhLllQ0MUNbs0Y5q1WXJOI9II=
github.com/uber-go/atomic v1.3.2 h1:Azu9lPBWRNKzYXSIwRfgRuDuS0YKsK4NFhiQv98gkxo=
github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
github.com/uber-go/tally v3.3.15+incompatible h1:9hLSgNBP28CjIaDmAuRTq9qV+UZY+9PcvAkXO4nNMwg=
github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU=
github.com/uber/athenadriver v1.1.4 h1:k6k0RBeXjR7oZ8NO557MsRw3eX1cc/9B0GNx+W9eHiQ=
github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E=
github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQGFt7E53bPYqEgug/AoBtY=
github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
@ -636,6 +594,8 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT
github.com/willf/bitset v1.1.9 h1:GBtFynGY9ZWZmEC9sWuu41/7VBXPFCOAbCbqTflOg9c=
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v1.0.0 h1:J0TkWtiuYgtdlrkkrDLISYBQ92M+X5m4LrIIMKrbDTs=
github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA=
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
@ -664,7 +624,6 @@ go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E=
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
@ -675,7 +634,6 @@ go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -684,13 +642,11 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529 h1:iMGN4xG0cnqj3t+zOM8wUB0BiPKHEwSxEZCvzcbZuvk=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -978,7 +934,6 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
istio.io/api v0.0.0-20190515205759-982e5c3888c6/go.mod h1:hhLFQmpHia8zgaM37vb2ml9iS5NfNfqZGRt1pS9aVEo=

View File

@ -9,10 +9,10 @@ import (
"github.com/influxdata/influxdb/v2/chronograf/server"
"github.com/influxdata/influxdb/v2/dbrp"
"github.com/influxdata/influxdb/v2/http/metric"
"github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/kit/feature"
"github.com/influxdata/influxdb/v2/kit/prom"
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
"github.com/influxdata/influxdb/v2/models"
"github.com/influxdata/influxdb/v2/query"
"github.com/influxdata/influxdb/v2/storage"
"github.com/prometheus/client_golang/prometheus"
@ -60,6 +60,7 @@ type APIBackend struct {
BackupService influxdb.BackupService
KVBackupService influxdb.KVBackupService
AuthorizationService influxdb.AuthorizationService
OnboardingService influxdb.OnboardingService
DBRPService influxdb.DBRPMappingServiceV2
BucketService influxdb.BucketService
SessionService influxdb.SessionService
@ -76,6 +77,7 @@ type APIBackend struct {
VariableService influxdb.VariableService
PasswordsService influxdb.PasswordsService
InfluxQLService query.ProxyQueryService
InfluxqldService influxql.ProxyQueryService
FluxService query.ProxyQueryService
FluxLanguageService influxdb.FluxLanguageService
TaskService influxdb.TaskService
@ -199,11 +201,11 @@ func NewAPIHandler(b *APIBackend, opts ...APIHandlerOptFn) *APIHandler {
writeBackend := NewWriteBackend(b.Logger.With(zap.String("handler", "write")), b)
h.Mount(prefixWrite, NewWriteHandler(b.Logger, writeBackend,
WithMaxBatchSizeBytes(b.MaxBatchSizeBytes),
WithParserOptions(
models.WithParserMaxBytes(b.WriteParserMaxBytes),
models.WithParserMaxLines(b.WriteParserMaxLines),
models.WithParserMaxValues(b.WriteParserMaxValues),
),
//WithParserOptions(
// models.WithParserMaxBytes(b.WriteParserMaxBytes),
// models.WithParserMaxLines(b.WriteParserMaxLines),
// models.WithParserMaxValues(b.WriteParserMaxValues),
//),
))
for _, o := range opts {

View File

@ -13,7 +13,6 @@ import (
pcontext "github.com/influxdata/influxdb/v2/context"
"github.com/influxdata/influxdb/v2/kit/tracing"
"github.com/influxdata/influxdb/v2/predicate"
"github.com/influxdata/influxdb/v2/tsdb/tsm1"
"go.uber.org/zap"
)
@ -122,7 +121,6 @@ func (h *DeleteHandler) handleDelete(w http.ResponseWriter, r *http.Request) {
dr.Start,
dr.Stop,
dr.Predicate,
influxdb.DeletePrefixRangeOptions{KeepSeries: dr.KeepSeries},
)
if err != nil {
h.HandleHTTPError(ctx, err, w)
@ -157,33 +155,28 @@ func decodeDeleteRequest(ctx context.Context, r *http.Request, orgSvc influxdb.O
}
type deleteRequest struct {
Org *influxdb.Organization
Bucket *influxdb.Bucket
Start int64
Stop int64
Predicate influxdb.Predicate
KeepSeries bool
Org *influxdb.Organization
Bucket *influxdb.Bucket
Start int64
Stop int64
Predicate influxdb.Predicate
}
type deleteRequestDecode struct {
Start string `json:"start"`
Stop string `json:"stop"`
Predicate string `json:"predicate"`
PredicateBytes []byte `json:"predicate_bytes"`
KeepSeries bool `json:"keep_series"`
Start string `json:"start"`
Stop string `json:"stop"`
Predicate string `json:"predicate"`
}
// DeleteRequest is the request send over http to delete points.
type DeleteRequest struct {
OrgID string `json:"-"`
Org string `json:"-"` // org name
BucketID string `json:"-"`
Bucket string `json:"-"`
Start string `json:"start"`
Stop string `json:"stop"`
Predicate string `json:"predicate"`
PredicateBytes []byte `json:"predicate_bytes"`
KeepSeries bool `json:"keep_series"`
OrgID string `json:"-"`
Org string `json:"-"` // org name
BucketID string `json:"-"`
Bucket string `json:"-"`
Start string `json:"start"`
Stop string `json:"stop"`
Predicate string `json:"predicate"`
}
func (dr *deleteRequest) UnmarshalJSON(b []byte) error {
@ -195,8 +188,7 @@ func (dr *deleteRequest) UnmarshalJSON(b []byte) error {
Err: err,
}
}
*dr = deleteRequest{KeepSeries: drd.KeepSeries}
*dr = deleteRequest{}
start, err := time.Parse(time.RFC3339Nano, drd.Start)
if err != nil {
return &influxdb.Error{
@ -216,22 +208,12 @@ func (dr *deleteRequest) UnmarshalJSON(b []byte) error {
}
}
dr.Stop = stop.UnixNano()
if len(drd.PredicateBytes) != 0 {
if dr.Predicate, err = tsm1.UnmarshalPredicate(drd.PredicateBytes); err != nil {
return err
}
} else {
node, err := predicate.Parse(drd.Predicate)
if err != nil {
return err
}
if dr.Predicate, err = predicate.New(node); err != nil {
return err
}
node, err := predicate.Parse(drd.Predicate)
if err != nil {
return err
}
return nil
dr.Predicate, err = predicate.New(node)
return err
}
// DeleteService sends data over HTTP to delete points.

39
http/legacy.go Normal file
View File

@ -0,0 +1,39 @@
package http
import (
"github.com/influxdata/influxdb/v2/http/legacy"
)
// newLegacyBackend constructs a legacy backend from an api backend.
func newLegacyBackend(b *APIBackend) *legacy.Backend {
return &legacy.Backend{
HTTPErrorHandler: b.HTTPErrorHandler,
Logger: b.Logger,
// TODO(sgc): /write support
//MaxBatchSizeBytes: b.APIBackend.MaxBatchSizeBytes,
AuthorizationService: b.AuthorizationService,
OrganizationService: b.OrganizationService,
BucketService: b.BucketService,
PointsWriter: b.PointsWriter,
DBRPMappingServiceV2: b.DBRPService,
ProxyQueryService: b.InfluxQLService,
InfluxqldQueryService: b.InfluxqldService,
WriteEventRecorder: b.WriteEventRecorder,
}
}
// newLegacyHandler constructs a legacy handler from a backend.
func newLegacyHandler(b *legacy.Backend, config legacy.HandlerConfig) *legacy.Handler {
h := &legacy.Handler{
HTTPErrorHandler: b.HTTPErrorHandler,
}
pointsWriterBackend := legacy.NewPointsWriterBackend(b)
h.PointsWriterHandler = legacy.NewWriterHandler(pointsWriterBackend, legacy.WithMaxBatchSizeBytes(b.MaxBatchSizeBytes))
influxqlBackend := legacy.NewInfluxQLBackend(b)
h.InfluxQLHandler = legacy.NewInfluxQLHandler(influxqlBackend, config)
h.PingHandler = legacy.NewPingHandler(config.Version)
return h
}

83
http/legacy/backend.go Normal file
View File

@ -0,0 +1,83 @@
package legacy
import (
http2 "net/http"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/http/metric"
"github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/kit/cli"
"github.com/influxdata/influxdb/v2/query"
"github.com/influxdata/influxdb/v2/storage"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
// Handler is a collection of all the service handlers.
type Handler struct {
influxdb.HTTPErrorHandler
PointsWriterHandler *WriteHandler
PingHandler *PingHandler
InfluxQLHandler *InfluxqlHandler
}
type Backend struct {
influxdb.HTTPErrorHandler
Logger *zap.Logger
MaxBatchSizeBytes int64
WriteEventRecorder metric.EventRecorder
AuthorizationService influxdb.AuthorizationService
OrganizationService influxdb.OrganizationService
BucketService influxdb.BucketService
PointsWriter storage.PointsWriter
DBRPMappingServiceV2 influxdb.DBRPMappingServiceV2
ProxyQueryService query.ProxyQueryService
InfluxqldQueryService influxql.ProxyQueryService
}
// HandlerConfig provides configuration for the legacy handler.
type HandlerConfig struct {
Version string
DefaultRoutingKey string
}
func NewHandlerConfig() *HandlerConfig {
return &HandlerConfig{}
}
// Opts returns the CLI options for use with kit/cli.
// Currently set values on c are provided as the defaults.
func (c *HandlerConfig) Opts() []cli.Opt {
return []cli.Opt{
{
DestP: &c.DefaultRoutingKey,
Flag: "influxql-default-routing-key",
Default: "defaultQueue",
Desc: "Default routing key for publishing new query requests",
},
}
}
func (h *Handler) ServeHTTP(w http2.ResponseWriter, r *http2.Request) {
if r.URL.Path == "/write" {
h.PointsWriterHandler.ServeHTTP(w, r)
return
}
if r.URL.Path == "/ping" {
h.PingHandler.ServeHTTP(w, r)
return
}
if r.URL.Path == "/query" {
h.InfluxQLHandler.ServeHTTP(w, r)
return
}
w.WriteHeader(http2.StatusNotFound)
}
func (h *Handler) PrometheusCollectors() []prometheus.Collector {
return h.InfluxQLHandler.PrometheusCollectors()
}

27
http/legacy/common.go Normal file
View File

@ -0,0 +1,27 @@
package legacy
import (
"context"
"github.com/influxdata/influxdb/v2"
pcontext "github.com/influxdata/influxdb/v2/context"
)
// getAuthorization extracts authorization information from a context.Context.
// It guards against non influxdb.Authorization values for authorization and
// InfluxQL feature flag not enabled.
func getAuthorization(ctx context.Context) (*influxdb.Authorization, error) {
authorizer, err := pcontext.GetAuthorizer(ctx)
if err != nil {
return nil, err
}
a, ok := authorizer.(*influxdb.Authorization)
if !ok {
return nil, &influxdb.Error{
Code: influxdb.EForbidden,
Msg: "insufficient permissions; session not supported",
}
}
return a, nil
}

View File

@ -0,0 +1,175 @@
package legacy
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/influxdata/influxdb/v2"
platcontext "github.com/influxdata/influxdb/v2/context"
"github.com/opentracing/opentracing-go"
)
type Influx1xAuthenticationHandler struct {
influxdb.HTTPErrorHandler
next http.Handler
auth influxdb.AuthorizationService
user influxdb.UserService
}
// NewInflux1xAuthenticationHandler creates an authentication handler to process
// InfluxDB 1.x authentication requests.
func NewInflux1xAuthenticationHandler(next http.Handler, auth influxdb.AuthorizationService, user influxdb.UserService, h influxdb.HTTPErrorHandler) *Influx1xAuthenticationHandler {
return &Influx1xAuthenticationHandler{
HTTPErrorHandler: h,
next: next,
auth: auth,
user: user,
}
}
// ServeHTTP extracts the session or token from the http request and places the resulting authorizer on the request context.
func (h *Influx1xAuthenticationHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// The ping endpoint does not need authorization
if r.URL.Path == "/ping" {
h.next.ServeHTTP(w, r)
return
}
ctx := r.Context()
creds, err := h.parseCredentials(r)
if err != nil {
unauthorizedError(ctx, h, w)
return
}
auth, err := h.auth.FindAuthorizationByToken(ctx, creds.Token)
if err != nil {
unauthorizedError(ctx, h, w)
return
}
var user *influxdb.User
if creds.Username != "" {
user, err = h.user.FindUser(ctx, influxdb.UserFilter{Name: &creds.Username})
if err != nil {
unauthorizedError(ctx, h, w)
return
}
if user.ID != auth.UserID {
h.HandleHTTPError(ctx, &influxdb.Error{
Code: influxdb.EForbidden,
Msg: "Username and Token do not match",
}, w)
return
}
} else {
user, err = h.user.FindUserByID(ctx, auth.UserID)
if err != nil {
unauthorizedError(ctx, h, w)
return
}
}
if err = h.isUserActive(user); err != nil {
inactiveUserError(ctx, h, w)
return
}
ctx = platcontext.SetAuthorizer(ctx, auth)
if span := opentracing.SpanFromContext(ctx); span != nil {
span.SetTag("user_id", auth.GetUserID().String())
}
h.next.ServeHTTP(w, r.WithContext(ctx))
}
func (h *Influx1xAuthenticationHandler) isUserActive(u *influxdb.User) error {
if u.Status != "inactive" {
return nil
}
return &influxdb.Error{Code: influxdb.EForbidden, Msg: "User is inactive"}
}
type credentials struct {
Username string
Token string
}
func parseToken(token string) (user, pass string, ok bool) {
s := strings.IndexByte(token, ':')
if s < 0 {
// Token <token>
return "", token, true
}
// Token <username>:<token>
return token[:s], token[s+1:], true
}
// parseCredentials parses a request and returns the authentication credentials.
// The credentials may be present as URL query params, or as a Basic
// Authentication header.
// As params: http://127.0.0.1/query?u=username&p=token
// As basic auth: http://username:token@127.0.0.1
// As Token in Authorization header: Token <username:token>
func (h *Influx1xAuthenticationHandler) parseCredentials(r *http.Request) (*credentials, error) {
q := r.URL.Query()
// Check for username and password in URL params.
if u, p := q.Get("u"), q.Get("p"); u != "" && p != "" {
return &credentials{
Username: u,
Token: p,
}, nil
}
// Check for the HTTP Authorization header.
if s := r.Header.Get("Authorization"); s != "" {
// Check for Bearer token.
strs := strings.Split(s, " ")
if len(strs) == 2 {
switch strs[0] {
case "Token":
if u, p, ok := parseToken(strs[1]); ok {
return &credentials{
Username: u,
Token: p,
}, nil
}
// fallback to only a token
}
}
// Check for basic auth.
if u, p, ok := r.BasicAuth(); ok {
return &credentials{
Username: u,
Token: p,
}, nil
}
}
return nil, fmt.Errorf("unable to parse authentication credentials")
}
// unauthorizedError encodes a error message and status code for unauthorized access.
func unauthorizedError(ctx context.Context, h influxdb.HTTPErrorHandler, w http.ResponseWriter) {
h.HandleHTTPError(ctx, &influxdb.Error{
Code: influxdb.EUnauthorized,
Msg: "unauthorized access",
}, w)
}
// inactiveUserError encode a error message and status code for inactive users.
func inactiveUserError(ctx context.Context, h influxdb.HTTPErrorHandler, w http.ResponseWriter) {
h.HandleHTTPError(ctx, &influxdb.Error{
Code: influxdb.EForbidden,
Msg: "User is inactive",
}, w)
}

View File

@ -0,0 +1,198 @@
package legacy
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/influxdb/v2"
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
"github.com/influxdata/influxdb/v2/mock"
)
const tokenScheme = "Token " // TODO(goller): I'd like this to be Bearer
func setToken(token string, req *http.Request) {
req.Header.Set("Authorization", fmt.Sprintf("%s%s", tokenScheme, token))
}
func TestInflux1xAuthenticationHandler(t *testing.T) {
var one = influxdb.ID(1)
type fields struct {
FindAuthorizationByTokenFn func(context.Context, string) (*influxdb.Authorization, error)
FindUserFn func(context.Context, influxdb.UserFilter) (*influxdb.User, error)
FindUserByIDFn func(context.Context, influxdb.ID) (*influxdb.User, error)
}
type exp struct {
code int
}
basic := func(u, p string) func(r *http.Request) {
return func(r *http.Request) {
r.SetBasicAuth(u, p)
}
}
token := func(u, p string) func(r *http.Request) {
return func(r *http.Request) {
if u == "" {
setToken(p, r)
} else {
setToken(u+":"+p, r)
}
}
}
query := func(u, p string) func(r *http.Request) {
return func(r *http.Request) {
v := r.URL.Query()
v.Add("u", u)
v.Add("p", p)
r.URL.RawQuery = v.Encode()
}
}
const (
User = "sydney"
Token = "my-token"
)
tests := []struct {
name string
fields fields
auth func(r *http.Request)
exp exp
}{
// successful requests
{
name: "basic auth",
fields: fields{},
auth: basic(User, Token),
exp: exp{
code: http.StatusOK,
},
},
{
name: "query string",
fields: fields{},
auth: query(User, Token),
exp: exp{
code: http.StatusOK,
},
},
{
name: "Token as user:token",
fields: fields{},
auth: token(User, Token),
exp: exp{
code: http.StatusOK,
},
},
{
name: "Token as token",
fields: fields{},
auth: token("", Token),
exp: exp{
code: http.StatusOK,
},
},
{
name: "token does not exist",
fields: fields{
FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) {
return nil, fmt.Errorf("authorization not found")
},
},
exp: exp{
code: http.StatusUnauthorized,
},
},
{
name: "user is inactive",
fields: fields{
FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) {
return &influxdb.Authorization{UserID: one}, nil
},
FindUserFn: func(ctx context.Context, f influxdb.UserFilter) (*influxdb.User, error) {
return &influxdb.User{ID: one, Status: "inactive"}, nil
},
},
auth: basic(User, Token),
exp: exp{
code: http.StatusForbidden,
},
},
{
name: "username and token mismatch",
fields: fields{
FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) {
return &influxdb.Authorization{UserID: one}, nil
},
FindUserFn: func(ctx context.Context, f influxdb.UserFilter) (*influxdb.User, error) {
return &influxdb.User{ID: influxdb.ID(2)}, nil
},
},
auth: basic(User, Token),
exp: exp{
code: http.StatusForbidden,
},
},
{
name: "no auth provided",
fields: fields{
FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) {
return &influxdb.Authorization{}, nil
},
},
exp: exp{
code: http.StatusUnauthorized,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var h *Influx1xAuthenticationHandler
{
auth := &mock.AuthorizationService{FindAuthorizationByTokenFn: tt.fields.FindAuthorizationByTokenFn}
if auth.FindAuthorizationByTokenFn == nil {
auth.FindAuthorizationByTokenFn = func(ctx context.Context, token string) (*influxdb.Authorization, error) {
return &influxdb.Authorization{UserID: one}, nil
}
}
user := &mock.UserService{FindUserFn: tt.fields.FindUserFn, FindUserByIDFn: tt.fields.FindUserByIDFn}
if user.FindUserFn == nil {
user.FindUserFn = func(context.Context, influxdb.UserFilter) (*influxdb.User, error) {
return &influxdb.User{ID: one}, nil
}
}
if user.FindUserByIDFn == nil {
user.FindUserByIDFn = func(_ context.Context, id influxdb.ID) (*influxdb.User, error) {
return &influxdb.User{ID: id}, nil
}
}
next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
})
h = NewInflux1xAuthenticationHandler(next, auth, user, kithttp.ErrorHandler(0))
}
w := httptest.NewRecorder()
r := httptest.NewRequest("POST", "http://any.url", nil)
if tt.auth != nil {
tt.auth(r)
}
h.ServeHTTP(w, r)
if got, want := w.Code, tt.exp.code; got != want {
t.Errorf("expected status code to be %d got %d", want, got)
}
})
}
}

View File

@ -0,0 +1,56 @@
package legacy
import (
"net/http"
platform "github.com/influxdata/influxdb/v2"
influxqld "github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/influxql/control"
"github.com/influxdata/influxdb/v2/query"
"go.uber.org/zap"
)
// InfluxqlHandler mimics the /query handler from influxdb, but, enriches
// with org and forwards requests to the transpiler service.
type InfluxqlHandler struct {
*InfluxQLBackend
HandlerConfig
Metrics *control.ControllerMetrics
}
type InfluxQLBackend struct {
platform.HTTPErrorHandler
Logger *zap.Logger
AuthorizationService platform.AuthorizationService
OrganizationService platform.OrganizationService
ProxyQueryService query.ProxyQueryService
InfluxqldQueryService influxqld.ProxyQueryService
}
// NewInfluxQLBackend constructs an InfluxQLBackend from a LegacyBackend.
func NewInfluxQLBackend(b *Backend) *InfluxQLBackend {
return &InfluxQLBackend{
HTTPErrorHandler: b.HTTPErrorHandler,
Logger: b.Logger.With(zap.String("handler", "influxql")),
AuthorizationService: b.AuthorizationService,
OrganizationService: b.OrganizationService,
InfluxqldQueryService: b.InfluxqldQueryService,
}
}
// NewInfluxQLHandler returns a new instance of InfluxqlHandler to handle influxql v1 queries
func NewInfluxQLHandler(b *InfluxQLBackend, config HandlerConfig) *InfluxqlHandler {
return &InfluxqlHandler{
InfluxQLBackend: b,
HandlerConfig: config,
Metrics: control.NewControllerMetrics([]string{}),
}
}
func (h *InfluxqlHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
h.handleInfluxqldQuery(w, req)
}
// DefaultChunkSize is the default number of points to write in
// one chunk.
const DefaultChunkSize = 10000

View File

@ -0,0 +1,176 @@
package legacy
import (
"encoding/json"
"io/ioutil"
"mime"
"net/http"
"strconv"
"strings"
"github.com/influxdata/flux/iocounter"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/kit/tracing"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
const (
traceIDHeader = "Trace-Id"
)
func (h *InfluxqlHandler) PrometheusCollectors() []prometheus.Collector {
return []prometheus.Collector{
h.Metrics.Requests,
h.Metrics.RequestsLatency,
}
}
// HandleQuery mimics the influxdb 1.0 /query
func (h *InfluxqlHandler) handleInfluxqldQuery(w http.ResponseWriter, r *http.Request) {
span, r := tracing.ExtractFromHTTPRequest(r, "handleInfluxqldQuery")
defer span.Finish()
if id, _, found := tracing.InfoFromSpan(span); found {
w.Header().Set(traceIDHeader, id)
}
ctx := r.Context()
defer r.Body.Close()
auth, err := getAuthorization(ctx)
if err != nil {
h.HandleHTTPError(ctx, err, w)
return
}
if !auth.IsActive() {
h.HandleHTTPError(ctx, &influxdb.Error{
Code: influxdb.EForbidden,
Msg: "insufficient permissions",
}, w)
return
}
o, err := h.OrganizationService.FindOrganization(ctx, influxdb.OrganizationFilter{
ID: &auth.OrgID,
})
if err != nil {
h.HandleHTTPError(ctx, err, w)
return
}
var query string
// Attempt to read the form value from the "q" form value.
if qp := strings.TrimSpace(r.FormValue("q")); qp != "" {
query = qp
} else if r.MultipartForm != nil && r.MultipartForm.File != nil {
// If we have a multipart/form-data, try to retrieve a file from 'q'.
if fhs := r.MultipartForm.File["q"]; len(fhs) > 0 {
d, err := ioutil.ReadFile(fhs[0].Filename)
if err != nil {
h.HandleHTTPError(ctx, err, w)
return
}
query = string(d)
}
} else {
ct := r.Header.Get("Content-Type")
mt, _, err := mime.ParseMediaType(ct)
if err != nil {
h.HandleHTTPError(ctx, &influxdb.Error{
Code: influxdb.EInvalid,
Err: err,
}, w)
return
}
if mt == "application/vnd.influxql" {
if d, err := ioutil.ReadAll(r.Body); err != nil {
h.HandleHTTPError(ctx, err, w)
return
} else {
query = string(d)
}
}
}
// parse the parameters
rawParams := r.FormValue("params")
var params map[string]interface{}
if rawParams != "" {
decoder := json.NewDecoder(strings.NewReader(rawParams))
decoder.UseNumber()
if err := decoder.Decode(&params); err != nil {
h.HandleHTTPError(ctx, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "error parsing query parameters",
Err: err,
}, w)
return
}
// Convert json.Number into int64 and float64 values
for k, v := range params {
if v, ok := v.(json.Number); ok {
var err error
if strings.Contains(string(v), ".") {
params[k], err = v.Float64()
} else {
params[k], err = v.Int64()
}
if err != nil {
h.HandleHTTPError(ctx, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "error parsing json value",
Err: err,
}, w)
return
}
}
}
}
// Parse chunk size. Use default if not provided or cannot be parsed
chunked := r.FormValue("chunked") == "true"
chunkSize := DefaultChunkSize
if chunked {
if n, err := strconv.ParseInt(r.FormValue("chunk_size"), 10, 64); err == nil && int(n) > 0 {
chunkSize = int(n)
}
}
req := &influxql.QueryRequest{
DB: r.FormValue("db"),
RP: r.FormValue("rp"),
Epoch: r.FormValue("epoch"),
EncodingFormat: influxql.EncodingFormatFromMimeType(r.Header.Get("Accept")),
OrganizationID: o.ID,
Query: query,
Params: params,
Source: r.Header.Get("User-Agent"),
Authorization: auth,
Chunked: chunked,
ChunkSize: chunkSize,
}
var respSize int64
cw := iocounter.Writer{Writer: w}
_, err = h.InfluxqldQueryService.Query(ctx, &cw, req)
respSize = cw.Count()
if err != nil {
if respSize == 0 {
// Only record the error headers IFF nothing has been written to w.
h.HandleHTTPError(ctx, err, w)
return
}
h.Logger.Info("error writing response to client",
zap.String("org", o.Name),
zap.String("handler", "influxql"),
zap.Error(err),
)
}
}

View File

@ -0,0 +1,266 @@
//lint:file-ignore U1000 this error seems to be misreporting
package legacy
import (
"context"
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/google/go-cmp/cmp"
platform "github.com/influxdata/influxdb/v2"
pcontext "github.com/influxdata/influxdb/v2/context"
"github.com/influxdata/influxdb/v2/influxql"
imock "github.com/influxdata/influxdb/v2/influxql/mock"
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
"github.com/influxdata/influxdb/v2/mock"
)
var cmpOpts = []cmp.Option{
// Ignore request ID when comparing headers.
cmp.Comparer(func(h1, h2 http.Header) bool {
for k, v1 := range h1 {
if k == "X-Request-Id" || k == "Request-Id" {
continue
}
if v2, ok := h2[k]; !ok || !cmp.Equal(v1, v2) {
return false
}
}
for k, v2 := range h2 {
if k == "X-Request-Id" || k == "Request-Id" {
continue
}
if v1, ok := h1[k]; !ok || !cmp.Equal(v2, v1) {
return false
}
}
return true
}),
}
func TestInfluxQLdHandler_HandleQuery(t *testing.T) {
t.Skip("almost good to go, only unexpected content types")
ctx := context.Background()
type fields struct {
OrganizationService platform.OrganizationService
ProxyQueryService influxql.ProxyQueryService
}
type args struct {
w *httptest.ResponseRecorder
r *http.Request
}
tests := []struct {
name string
fields fields
args args
context context.Context
wantCode int
wantHeader http.Header
wantBody []byte
wantLogs []string
}{
{
name: "no token causes http error",
args: args{
r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx),
w: httptest.NewRecorder(),
},
wantCode: http.StatusInternalServerError,
wantHeader: http.Header{
"X-Platform-Error-Code": {"internal error"},
"Content-Type": {"application/json; charset=utf-8"},
},
wantBody: []byte(`{"code":"internal error","message":"authorizer not found on context"}`),
},
{
name: "inactive authorizer",
context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Inactive}),
args: args{
r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx),
w: httptest.NewRecorder(),
},
wantCode: http.StatusForbidden,
wantHeader: http.Header{
"Content-Type": {"application/json; charset=utf-8"},
"X-Platform-Error-Code": {"forbidden"},
},
wantBody: []byte(`{"code":"forbidden","message":"insufficient permissions"}`),
},
{
name: "unknown organization",
context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}),
fields: fields{
OrganizationService: &mock.OrganizationService{
FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
return nil, &platform.Error{
Code: platform.EForbidden,
Msg: "nope",
}
},
},
},
args: args{
r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx),
w: httptest.NewRecorder(),
},
wantCode: http.StatusForbidden,
wantHeader: http.Header{
"Content-Type": {"application/json; charset=utf-8"},
"X-Platform-Error-Code": {"forbidden"},
},
wantBody: []byte(`{"code":"forbidden","message":"nope"}`),
},
{
name: "bad query",
context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}),
fields: fields{
OrganizationService: &mock.OrganizationService{
FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
return &platform.Organization{}, nil
},
},
ProxyQueryService: &imock.ProxyQueryService{
QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) {
return influxql.Statistics{}, &platform.Error{
Code: platform.EUnprocessableEntity,
Msg: "bad query",
}
},
},
},
args: args{
r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx),
w: httptest.NewRecorder(),
},
wantCode: http.StatusUnprocessableEntity,
wantHeader: http.Header{
"X-Platform-Error-Code": {"unprocessable entity"},
"Content-Type": {"application/json; charset=utf-8"},
},
wantBody: []byte(`{"code":"unprocessable entity","message":"bad query"}`),
},
{
name: "query fails during write",
context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}),
fields: fields{
OrganizationService: &mock.OrganizationService{
FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
return &platform.Organization{}, nil
},
},
ProxyQueryService: &imock.ProxyQueryService{
QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) {
_, _ = io.WriteString(w, "fail")
return influxql.Statistics{}, &platform.Error{
Code: platform.EInternal,
Msg: "during query",
}
},
},
},
args: args{
r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx),
w: httptest.NewRecorder(),
},
wantBody: []byte("fail"),
wantCode: http.StatusOK,
wantHeader: http.Header{
"Content-Type": {"application/json"},
},
},
{
name: "good query unknown accept header",
context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}),
fields: fields{
OrganizationService: &mock.OrganizationService{
FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
return &platform.Organization{}, nil
},
},
ProxyQueryService: &imock.ProxyQueryService{
QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) {
_, err := io.WriteString(w, "good")
return influxql.Statistics{}, err
},
},
},
args: args{
r: WithHeader(httptest.NewRequest("POST", "/query", nil).WithContext(ctx), "Accept", "text/csv"),
w: httptest.NewRecorder(),
},
wantBody: []byte("good"),
wantCode: http.StatusOK,
wantHeader: http.Header{
"Content-Type": {"text/csv"},
},
wantLogs: []string{"text/csv"},
},
{
name: "good query",
context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}),
fields: fields{
OrganizationService: &mock.OrganizationService{
FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
return &platform.Organization{}, nil
},
},
ProxyQueryService: &imock.ProxyQueryService{
QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) {
_, err := io.WriteString(w, "good")
return influxql.Statistics{}, err
},
},
},
args: args{
r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx),
w: httptest.NewRecorder(),
},
wantBody: []byte("good"),
wantCode: http.StatusOK,
wantHeader: http.Header{
"Content-Type": {"application/json"},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
b := &InfluxQLBackend{
HTTPErrorHandler: kithttp.ErrorHandler(0),
OrganizationService: tt.fields.OrganizationService,
InfluxqldQueryService: tt.fields.ProxyQueryService,
}
h := NewInfluxQLHandler(b, HandlerConfig{})
if tt.context != nil {
tt.args.r = tt.args.r.WithContext(tt.context)
}
tt.args.r.Header.Add("Content-Type", "application/vnd.influxql")
h.handleInfluxqldQuery(tt.args.w, tt.args.r)
if got, want := tt.args.w.Code, tt.wantCode; got != want {
t.Errorf("HandleQuery() status code = got %d / want %d", got, want)
}
if got, want := tt.args.w.Result().Header, tt.wantHeader; !cmp.Equal(got, want, cmpOpts...) {
t.Errorf("HandleQuery() headers = got(-)/want(+) %s", cmp.Diff(got, want))
}
if got, want := tt.args.w.Body.Bytes(), tt.wantBody; !cmp.Equal(got, want) {
t.Errorf("HandleQuery() body = got(-)/want(+) %s", cmp.Diff(string(got), string(want)))
}
})
}
}
func WithHeader(r *http.Request, key, value string) *http.Request {
r.Header.Set(key, value)
return r
}

View File

@ -0,0 +1,30 @@
package legacy
import (
"net/http"
"github.com/influxdata/httprouter"
)
type PingHandler struct {
*httprouter.Router
InfluxDBVersion string
}
func NewPingHandler(version string) *PingHandler {
h := &PingHandler{
Router: httprouter.New(),
InfluxDBVersion: version,
}
h.HandlerFunc("GET", "/ping", h.pingHandler)
h.HandlerFunc("HEAD", "/ping", h.pingHandler)
return h
}
// handlePostLegacyWrite is the HTTP handler for the POST /write route.
func (h *PingHandler) pingHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-Influxdb-Build", "cloud2")
w.Header().Add("X-Influxdb-Version", h.InfluxDBVersion)
w.WriteHeader(http.StatusNoContent)
}

85
http/legacy/router.go Normal file
View File

@ -0,0 +1,85 @@
package legacy
import (
"fmt"
"net/http"
"os"
"runtime/debug"
"sync"
"github.com/influxdata/httprouter"
platform "github.com/influxdata/influxdb/v2"
influxlogger "github.com/influxdata/influxdb/v2/logger"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// NewRouter returns a new router with a 404 handler, a 405 handler, and a panic handler.
func NewRouter(h platform.HTTPErrorHandler) *httprouter.Router {
b := baseHandler{HTTPErrorHandler: h}
router := httprouter.New()
router.NotFound = http.HandlerFunc(b.notFound)
router.MethodNotAllowed = http.HandlerFunc(b.methodNotAllowed)
router.PanicHandler = b.panic
router.AddMatchedRouteToContext = true
return router
}
type baseHandler struct {
platform.HTTPErrorHandler
}
// notFound represents a 404 handler that return a JSON response.
func (h baseHandler) notFound(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
pe := &platform.Error{
Code: platform.ENotFound,
Msg: "path not found",
}
h.HandleHTTPError(ctx, pe, w)
}
// methodNotAllowed represents a 405 handler that return a JSON response.
func (h baseHandler) methodNotAllowed(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
allow := w.Header().Get("Allow")
pe := &platform.Error{
Code: platform.EMethodNotAllowed,
Msg: fmt.Sprintf("allow: %s", allow),
}
h.HandleHTTPError(ctx, pe, w)
}
// panic handles panics recovered from http handlers.
// It returns a json response with http status code 500 and the recovered error message.
func (h baseHandler) panic(w http.ResponseWriter, r *http.Request, rcv interface{}) {
ctx := r.Context()
pe := &platform.Error{
Code: platform.EInternal,
Msg: "a panic has occurred",
Err: fmt.Errorf("%s: %v", r.URL.String(), rcv),
}
l := getPanicLogger()
if entry := l.Check(zapcore.ErrorLevel, pe.Msg); entry != nil {
entry.Stack = string(debug.Stack())
entry.Write(zap.Error(pe.Err))
}
h.HandleHTTPError(ctx, pe, w)
}
var panicLogger *zap.Logger
var panicLoggerOnce sync.Once
// getPanicLogger returns a logger for panicHandler.
func getPanicLogger() *zap.Logger {
panicLoggerOnce.Do(func() {
panicLogger = influxlogger.New(os.Stderr)
panicLogger = panicLogger.With(zap.String("handler", "panic"))
})
return panicLogger
}

View File

@ -0,0 +1,310 @@
package legacy
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"time"
"github.com/influxdata/httprouter"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/http/metric"
"github.com/influxdata/influxdb/v2/http/points"
"github.com/influxdata/influxdb/v2/kit/tracing"
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
"github.com/influxdata/influxdb/v2/storage"
"go.uber.org/zap"
)
var _ http.Handler = (*WriteHandler)(nil)
const (
opPointsWriter = "http/v1PointsWriter"
opWriteHandler = "http/v1WriteHandler"
autoCreatedBucketDescription = "Auto-created from v1 db/rp mapping."
autoCreatedBucketRetentionPeriod = 3 * 24 * time.Hour
)
// PointsWriterBackend contains all the services needed to run a PointsWriterHandler.
type PointsWriterBackend struct {
influxdb.HTTPErrorHandler
Logger *zap.Logger
EventRecorder metric.EventRecorder
BucketService influxdb.BucketService
PointsWriter storage.PointsWriter
DBRPMappingService influxdb.DBRPMappingServiceV2
}
// NewPointsWriterBackend creates a new backend for legacy work.
func NewPointsWriterBackend(b *Backend) *PointsWriterBackend {
return &PointsWriterBackend{
HTTPErrorHandler: b.HTTPErrorHandler,
Logger: b.Logger.With(zap.String("handler", "points_writer")),
EventRecorder: b.WriteEventRecorder,
BucketService: b.BucketService,
PointsWriter: b.PointsWriter,
DBRPMappingService: b.DBRPMappingServiceV2,
}
}
// PointsWriterHandler represents an HTTP API handler for writing points.
type WriteHandler struct {
influxdb.HTTPErrorHandler
EventRecorder metric.EventRecorder
BucketService influxdb.BucketService
PointsWriter storage.PointsWriter
DBRPMappingService influxdb.DBRPMappingServiceV2
router *httprouter.Router
logger *zap.Logger
maxBatchSizeBytes int64
//parserOptions []models.ParserOption
}
// NewWriterHandler returns a new instance of PointsWriterHandler.
func NewWriterHandler(b *PointsWriterBackend, opts ...WriteHandlerOption) *WriteHandler {
h := &WriteHandler{
HTTPErrorHandler: b.HTTPErrorHandler,
EventRecorder: b.EventRecorder,
BucketService: b.BucketService,
PointsWriter: b.PointsWriter,
DBRPMappingService: b.DBRPMappingService,
router: NewRouter(b.HTTPErrorHandler),
logger: b.Logger.With(zap.String("handler", "points_writer")),
}
for _, opt := range opts {
opt(h)
}
h.router.HandlerFunc(http.MethodPost, "/write", h.handleWrite)
return h
}
// WriteHandlerOption is a functional option for a *PointsWriterHandler
type WriteHandlerOption func(*WriteHandler)
// WithMaxBatchSizeBytes configures the maximum size for a
// (decompressed) points batch allowed by the write handler
func WithMaxBatchSizeBytes(n int64) WriteHandlerOption {
return func(w *WriteHandler) {
w.maxBatchSizeBytes = n
}
}
//// WithParserOptions configures options for points parsing
//func WithParserOptions(opts ...models.ParserOption) WriteHandlerOption {
// return func(w *WriteHandler) {
// w.parserOptions = opts
// }
//}
// ServeHTTP implements http.Handler
func (h *WriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.router.ServeHTTP(w, r)
}
// handleWrite handles requests for the v1 write endpoint
func (h *WriteHandler) handleWrite(w http.ResponseWriter, r *http.Request) {
span, r := tracing.ExtractFromHTTPRequest(r, "WriteHandler")
defer span.Finish()
ctx := r.Context()
auth, err := getAuthorization(ctx)
if err != nil {
h.HandleHTTPError(ctx, err, w)
return
}
sw := kithttp.NewStatusResponseWriter(w)
recorder := newWriteUsageRecorder(sw, h.EventRecorder)
var requestBytes int
defer func() {
// Close around the requestBytes variable to placate the linter.
recorder.Record(ctx, requestBytes, auth.OrgID, r.URL.Path)
}()
req, err := decodeWriteRequest(ctx, r, h.maxBatchSizeBytes)
if err != nil {
h.HandleHTTPError(ctx, err, sw)
return
}
bucket, err := h.findOrCreateMappedBucket(ctx, auth.OrgID, req.Database, req.RetentionPolicy)
if err != nil {
h.HandleHTTPError(ctx, err, sw)
return
}
span.LogKV("bucket_id", bucket.ID)
parsed, err := points.NewParser(req.Precision).Parse(ctx, auth.OrgID, bucket.ID, req.Body)
if err != nil {
h.HandleHTTPError(ctx, err, sw)
return
}
if err := h.PointsWriter.WritePoints(ctx, auth.OrgID, bucket.ID, parsed.Points); err != nil {
h.HandleHTTPError(ctx, &influxdb.Error{
Code: influxdb.EInternal,
Op: opWriteHandler,
Msg: "unexpected error writing points to database",
Err: err,
}, sw)
return
}
w.WriteHeader(http.StatusNoContent)
}
// findOrCreateMappedBucket finds a DBRPMappingV2 for the database and
// retention policy combination. If the mapping doesn't exist, it will be
// created and bound to either an existing Bucket or a new one created for this
// purpose.
func (h *WriteHandler) findOrCreateMappedBucket(ctx context.Context, orgID influxdb.ID, db, rp string) (*influxdb.Bucket, error) {
mapping, err := h.findMapping(ctx, orgID, db, rp)
if err == nil {
return h.BucketService.FindBucketByID(ctx, mapping.BucketID)
}
if !isErrNotFound(err) {
return nil, err
}
bucket, err := h.mapToBucket(ctx, orgID, db, rp)
if err != nil {
return nil, err
}
return bucket, nil
}
// findMapping finds a DBRPMappingV2 for the database and retention policy
// combination.
func (h *WriteHandler) findMapping(ctx context.Context, orgID influxdb.ID, db, rp string) (*influxdb.DBRPMappingV2, error) {
filter := influxdb.DBRPMappingFilterV2{
OrgID: &orgID,
Database: &db,
}
if rp != "" {
filter.RetentionPolicy = &rp
}
mappings, count, err := h.DBRPMappingService.FindMany(ctx, filter)
if err != nil {
return nil, err
}
if count == 0 {
return nil, &influxdb.Error{
Code: influxdb.ENotFound,
Msg: "no dbrp mapping found",
}
}
return mappings[0], nil
}
// createMapping creates a DBRPMappingV2 for the database and retention policy
// combination.
func (h *WriteHandler) createMapping(ctx context.Context, orgID, bucketID influxdb.ID, db, rp string) error {
return h.DBRPMappingService.Create(ctx, &influxdb.DBRPMappingV2{
OrganizationID: orgID,
BucketID: bucketID,
Database: db,
RetentionPolicy: rp,
})
}
// mapToBucket creates a new DBRPMappingV2 to either an existing Bucket (if it
// can find it) or a new one it creates for this purpose.
func (h *WriteHandler) mapToBucket(ctx context.Context, orgID influxdb.ID, db, rp string) (*influxdb.Bucket, error) {
if rp == "" {
rp = "autogen"
}
name := fmt.Sprintf("%s/%s", db, rp)
bucket, err := h.BucketService.FindBucket(ctx, influxdb.BucketFilter{
OrganizationID: &orgID,
Name: &name,
})
if err == nil {
if err := h.createMapping(ctx, orgID, bucket.ID, db, rp); err != nil {
return nil, err
}
return bucket, nil
}
if !isErrNotFound(err) {
return nil, err
}
now := time.Now().UTC()
bucket = &influxdb.Bucket{
Type: influxdb.BucketTypeUser,
Name: name,
Description: autoCreatedBucketDescription,
OrgID: orgID,
RetentionPolicyName: rp,
RetentionPeriod: autoCreatedBucketRetentionPeriod,
CRUDLog: influxdb.CRUDLog{
CreatedAt: now,
UpdatedAt: now,
},
}
err = h.BucketService.CreateBucket(ctx, bucket)
if err != nil {
return nil, err
}
if err := h.createMapping(ctx, orgID, bucket.ID, db, rp); err != nil {
return nil, err
}
return bucket, nil
}
// writeRequest is a transport-agnostic write request. It holds all inputs for
// processing a v1 write request.
type writeRequest struct {
OrganizationName string
Database string
RetentionPolicy string
Precision string
Body io.ReadCloser
}
// decodeWriteRequest extracts write request information from an inbound
// http.Request and returns a writeRequest.
func decodeWriteRequest(ctx context.Context, r *http.Request, maxBatchSizeBytes int64) (*writeRequest, error) {
qp := r.URL.Query()
precision := qp.Get("precision")
if precision == "" {
precision = "ns"
}
db := qp.Get("db")
if db == "" {
return nil, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "missing db",
}
}
encoding := r.Header.Get("Content-Encoding")
body, err := points.BatchReadCloser(r.Body, encoding, maxBatchSizeBytes)
if err != nil {
return nil, err
}
return &writeRequest{
OrganizationName: qp.Get("org"),
Database: db,
RetentionPolicy: qp.Get("rp"),
Precision: precision,
Body: body,
}, nil
}
func isErrNotFound(err error) bool {
var idErr *influxdb.Error
return errors.As(err, &idErr) && idErr.Code == influxdb.ENotFound
}

View File

@ -0,0 +1,490 @@
package legacy
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/authorizer"
pcontext "github.com/influxdata/influxdb/v2/context"
"github.com/influxdata/influxdb/v2/dbrp"
"github.com/influxdata/influxdb/v2/http/mocks"
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
"github.com/influxdata/influxdb/v2/models"
"github.com/influxdata/influxdb/v2/snowflake"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
)
var generator = snowflake.NewDefaultIDGenerator()
func TestWriteHandler_ExistingBucket(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
var (
// Mocked Services
eventRecorder = mocks.NewMockEventRecorder(ctrl)
dbrpMappingSvc = mocks.NewMockDBRPMappingServiceV2(ctrl)
bucketService = mocks.NewMockBucketService(ctrl)
pointsWriter = mocks.NewMockPointsWriter(ctrl)
// Found Resources
orgID = generator.ID()
bucket = &influxdb.Bucket{
ID: generator.ID(),
OrgID: orgID,
Name: "mydb/autogen",
RetentionPolicyName: "autogen",
RetentionPeriod: 72 * time.Hour,
}
mapping = &influxdb.DBRPMappingV2{
OrganizationID: orgID,
BucketID: bucket.ID,
Database: "mydb",
RetentionPolicy: "autogen",
}
lineProtocolBody = "m,t1=v1 f1=2 100"
)
findAutogenMapping := dbrpMappingSvc.
EXPECT().
FindMany(gomock.Any(), influxdb.DBRPMappingFilterV2{
OrgID: &mapping.OrganizationID,
Database: &mapping.Database,
}).Return([]*influxdb.DBRPMappingV2{mapping}, 1, nil)
findBucketByID := bucketService.
EXPECT().
FindBucketByID(gomock.Any(), bucket.ID).Return(bucket, nil)
points := parseLineProtocol(t, lineProtocolBody)
writePoints := pointsWriter.
EXPECT().
WritePoints(gomock.Any(), orgID, bucket.ID, pointsMatcher{points}).Return(nil)
recordWriteEvent := eventRecorder.EXPECT().
Record(gomock.Any(), gomock.Any())
gomock.InOrder(
findAutogenMapping,
findBucketByID,
writePoints,
recordWriteEvent,
)
perms := newPermissions(influxdb.BucketsResourceType, &orgID, nil)
auth := newAuthorization(orgID, perms...)
ctx := pcontext.SetAuthorizer(context.Background(), auth)
r := newWriteRequest(ctx, lineProtocolBody)
params := r.URL.Query()
params.Set("db", "mydb")
params.Set("rp", "")
r.URL.RawQuery = params.Encode()
handler := NewWriterHandler(&PointsWriterBackend{
HTTPErrorHandler: DefaultErrorHandler,
Logger: zaptest.NewLogger(t),
BucketService: authorizer.NewBucketService(bucketService),
DBRPMappingService: dbrp.NewAuthorizedService(dbrpMappingSvc),
PointsWriter: pointsWriter,
EventRecorder: eventRecorder,
})
w := httptest.NewRecorder()
handler.ServeHTTP(w, r)
assert.Equal(t, http.StatusNoContent, w.Code)
assert.Equal(t, "", w.Body.String())
}
func TestWriteHandler_DefaultBucketAutoCreation(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
var (
// Mocked Services
eventRecorder = mocks.NewMockEventRecorder(ctrl)
dbrpMappingSvc = mocks.NewMockDBRPMappingServiceV2(ctrl)
bucketService = mocks.NewMockBucketService(ctrl)
pointsWriter = mocks.NewMockPointsWriter(ctrl)
// DBRP Mapping Fields
db = "mydb"
orgID = generator.ID()
// Bucket Fields
bucketName = "mydb/autogen"
bucketID = generator.ID()
lineProtocolBody = "m,t1=v1 f1=2 100"
)
findAutogenMapping := dbrpMappingSvc.EXPECT().
FindMany(gomock.Any(), influxdb.DBRPMappingFilterV2{
OrgID: &orgID,
Database: &db,
}).Return([]*influxdb.DBRPMappingV2{}, 0, nil)
findBucketByName := bucketService.EXPECT().
FindBucket(gomock.Any(), influxdb.BucketFilter{
OrganizationID: &orgID,
Name: &bucketName,
}).Return(nil, &influxdb.Error{
Code: influxdb.ENotFound,
})
createAutogenMapping := dbrpMappingSvc.EXPECT().
Create(gomock.Any(), &influxdb.DBRPMappingV2{
OrganizationID: orgID,
Database: "mydb",
RetentionPolicy: "autogen",
BucketID: bucketID,
}).Return(nil)
createBucket := bucketService.EXPECT().
CreateBucket(gomock.Any(), bucketMatcher{&influxdb.Bucket{
Type: influxdb.BucketTypeUser,
Name: bucketName,
Description: autoCreatedBucketDescription,
OrgID: orgID,
RetentionPolicyName: "autogen",
RetentionPeriod: 72 * time.Hour,
}}).Return(nil).Do(func(_ context.Context, b *influxdb.Bucket) {
b.ID = bucketID
})
points := parseLineProtocol(t, lineProtocolBody)
writePoints := pointsWriter.EXPECT().
WritePoints(gomock.Any(), orgID, bucketID, pointsMatcher{points}).Return(nil)
recordWriteEvent := eventRecorder.EXPECT().
Record(gomock.Any(), gomock.Any())
gomock.InOrder(
findAutogenMapping,
findBucketByName,
createBucket,
createAutogenMapping,
writePoints,
recordWriteEvent,
)
perms := newPermissions(influxdb.BucketsResourceType, &orgID, nil)
auth := newAuthorization(orgID, perms...)
ctx := pcontext.SetAuthorizer(context.Background(), auth)
r := newWriteRequest(ctx, lineProtocolBody)
params := r.URL.Query()
params.Set("db", "mydb")
r.URL.RawQuery = params.Encode()
handler := NewWriterHandler(&PointsWriterBackend{
HTTPErrorHandler: DefaultErrorHandler,
Logger: zaptest.NewLogger(t),
BucketService: authorizer.NewBucketService(bucketService),
DBRPMappingService: dbrp.NewAuthorizedService(dbrpMappingSvc),
PointsWriter: pointsWriter,
EventRecorder: eventRecorder,
})
w := httptest.NewRecorder()
handler.ServeHTTP(w, r)
assert.Equal(t, http.StatusNoContent, w.Code)
assert.Equal(t, "", w.Body.String())
}
func TestWriteHandler_NamedBucketAutoCreation(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
var (
// Mocked Services
eventRecorder = mocks.NewMockEventRecorder(ctrl)
dbrpMappingSvc = mocks.NewMockDBRPMappingServiceV2(ctrl)
bucketService = mocks.NewMockBucketService(ctrl)
pointsWriter = mocks.NewMockPointsWriter(ctrl)
// DBRP Mapping Fields
db = "mydb"
rp = "myrp"
orgID = generator.ID()
// Bucket Fields
bucketName = "mydb/myrp"
bucketID = generator.ID()
lineProtocolBody = "m,t1=v1 f1=2 100"
)
findNamedMapping := dbrpMappingSvc.EXPECT().
FindMany(gomock.Any(), influxdb.DBRPMappingFilterV2{
OrgID: &orgID,
Database: &db,
RetentionPolicy: &rp,
}).Return([]*influxdb.DBRPMappingV2{}, 0, nil)
findBucketByName := bucketService.EXPECT().
FindBucket(gomock.Any(), influxdb.BucketFilter{
OrganizationID: &orgID,
Name: &bucketName,
}).Return(nil, &influxdb.Error{
Code: influxdb.ENotFound,
})
createNamedMapping := dbrpMappingSvc.EXPECT().
Create(gomock.Any(), &influxdb.DBRPMappingV2{
OrganizationID: orgID,
Database: "mydb",
RetentionPolicy: "myrp",
BucketID: bucketID,
Default: false,
}).Return(nil)
createBucket := bucketService.EXPECT().
CreateBucket(gomock.Any(), bucketMatcher{&influxdb.Bucket{
Type: influxdb.BucketTypeUser,
Name: bucketName,
Description: autoCreatedBucketDescription,
OrgID: orgID,
RetentionPolicyName: "myrp",
RetentionPeriod: 72 * time.Hour,
}}).Return(nil).Do(func(_ context.Context, b *influxdb.Bucket) {
b.ID = bucketID
})
points := parseLineProtocol(t, lineProtocolBody)
writePoints := pointsWriter.EXPECT().
WritePoints(gomock.Any(), orgID, bucketID, pointsMatcher{points}).Return(nil)
recordWriteEvent := eventRecorder.EXPECT().
Record(gomock.Any(), gomock.Any())
gomock.InOrder(
findNamedMapping,
findBucketByName,
createBucket,
createNamedMapping,
writePoints,
recordWriteEvent,
)
perms := newPermissions(influxdb.BucketsResourceType, &orgID, nil)
auth := newAuthorization(orgID, perms...)
ctx := pcontext.SetAuthorizer(context.Background(), auth)
r := newWriteRequest(ctx, lineProtocolBody)
params := r.URL.Query()
params.Set("db", "mydb")
params.Set("rp", "myrp")
r.URL.RawQuery = params.Encode()
handler := NewWriterHandler(&PointsWriterBackend{
HTTPErrorHandler: DefaultErrorHandler,
Logger: zaptest.NewLogger(t),
BucketService: authorizer.NewBucketService(bucketService),
DBRPMappingService: dbrp.NewAuthorizedService(dbrpMappingSvc),
PointsWriter: pointsWriter,
EventRecorder: eventRecorder,
})
w := httptest.NewRecorder()
handler.ServeHTTP(w, r)
assert.Equal(t, http.StatusNoContent, w.Code)
assert.Equal(t, "", w.Body.String())
}
func TestWriteHandler_MissingCreatePermissions(t *testing.T) {
orgID := generator.ID()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
var (
// Mocked Services
eventRecorder = mocks.NewMockEventRecorder(ctrl)
dbrpMappingSvc = mocks.NewMockDBRPMappingServiceV2(ctrl)
bucketService = mocks.NewMockBucketService(ctrl)
pointsWriter = mocks.NewMockPointsWriter(ctrl)
// DBRP Mapping Fields
db = "mydb"
rp = "myrp"
// Bucket Fields
bucketName = "mydb/myrp"
lineProtocolBody = "m,t1=v1 f1=2 100"
)
findNamedMapping := dbrpMappingSvc.EXPECT().
FindMany(gomock.Any(), influxdb.DBRPMappingFilterV2{
OrgID: &orgID,
Database: &db,
RetentionPolicy: &rp,
}).Return([]*influxdb.DBRPMappingV2{}, 0, nil)
findBucketByName := bucketService.EXPECT().
FindBucket(gomock.Any(), influxdb.BucketFilter{
OrganizationID: &orgID,
Name: &bucketName,
}).Return(nil, &influxdb.Error{
Code: influxdb.ENotFound,
})
recordWriteEvent := eventRecorder.EXPECT().
Record(gomock.Any(), gomock.Any())
gomock.InOrder(
findNamedMapping,
findBucketByName,
recordWriteEvent,
)
auth := newAuthorization(orgID)
ctx := pcontext.SetAuthorizer(context.Background(), auth)
r := newWriteRequest(ctx, lineProtocolBody)
params := r.URL.Query()
params.Set("db", "mydb")
params.Set("rp", "myrp")
r.URL.RawQuery = params.Encode()
handler := NewWriterHandler(&PointsWriterBackend{
HTTPErrorHandler: DefaultErrorHandler,
Logger: zaptest.NewLogger(t),
BucketService: authorizer.NewBucketService(bucketService),
DBRPMappingService: dbrp.NewAuthorizedService(dbrpMappingSvc),
PointsWriter: pointsWriter,
EventRecorder: eventRecorder,
})
w := httptest.NewRecorder()
handler.ServeHTTP(w, r)
assert.Equal(t, http.StatusUnauthorized, w.Code)
assertJSONErrorBody(t, w.Body, "unauthorized", fmt.Sprintf("write:orgs/%s/buckets is unauthorized", orgID))
}
var DefaultErrorHandler = kithttp.ErrorHandler(0)
func parseLineProtocol(t *testing.T, line string) []models.Point {
t.Helper()
points, err := models.ParsePoints([]byte(line))
if err != nil {
t.Error(err)
}
return points
}
type pointsMatcher struct {
points []models.Point
}
func (m pointsMatcher) Matches(x interface{}) bool {
other, ok := x.([]models.Point)
if !ok {
return false
}
if len(m.points) != len(other) {
return false
}
for i := 0; i < len(m.points)-1; i++ {
p := m.points[i]
op := other[i]
if !reflect.DeepEqual(p.Name(), op.Name()) {
return false
}
if !reflect.DeepEqual(p.Tags(), op.Tags()) {
return false
}
fields, err := p.Fields()
if err != nil {
return false
}
ofields, err := op.Fields()
if err != nil {
return false
}
if !reflect.DeepEqual(fields, ofields) {
return false
}
}
return true
}
func (m pointsMatcher) String() string {
return fmt.Sprintf("%#v", m.points)
}
type bucketMatcher struct {
*influxdb.Bucket
}
func (m bucketMatcher) Matches(x interface{}) bool {
other, ok := x.(*influxdb.Bucket)
if !ok {
return false
}
return cmp.Equal(m.Bucket, other, cmpopts.IgnoreFields(influxdb.Bucket{}, "CRUDLog"))
}
func (m bucketMatcher) String() string {
return fmt.Sprintf("%#v", m.Bucket)
}
func newPermissions(resourceType influxdb.ResourceType, orgID, id *influxdb.ID) []influxdb.Permission {
return []influxdb.Permission{
{
Action: influxdb.WriteAction,
Resource: influxdb.Resource{
Type: resourceType,
OrgID: orgID,
ID: id,
},
},
{
Action: influxdb.ReadAction,
Resource: influxdb.Resource{
Type: resourceType,
OrgID: orgID,
ID: id,
},
},
}
}
func newAuthorization(orgID influxdb.ID, permissions ...influxdb.Permission) *influxdb.Authorization {
return &influxdb.Authorization{
ID: generator.ID(),
Status: influxdb.Active,
OrgID: orgID,
Permissions: permissions,
}
}
func assertJSONErrorBody(t *testing.T, body io.Reader, code, message string) {
t.Helper()
var b struct {
Code string `json:"code"`
Message string `json:"message"`
}
err := json.NewDecoder(body).Decode(&b)
require.NoError(t, err)
assert.Equal(t, code, b.Code)
assert.Equal(t, message, b.Message)
}
func newWriteRequest(ctx context.Context, body string) *http.Request {
var r io.Reader
if body != "" {
r = strings.NewReader(body)
}
return httptest.NewRequest(http.MethodPost, "http://localhost:9999/write", r).WithContext(ctx)
}

View File

@ -0,0 +1,31 @@
package legacy
import (
"context"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/http/metric"
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
)
func newWriteUsageRecorder(w *kithttp.StatusResponseWriter, recorder metric.EventRecorder) *writeUsageRecorder {
return &writeUsageRecorder{
Writer: w,
EventRecorder: recorder,
}
}
type writeUsageRecorder struct {
Writer *kithttp.StatusResponseWriter
EventRecorder metric.EventRecorder
}
func (w *writeUsageRecorder) Record(ctx context.Context, requestBytes int, orgID influxdb.ID, endpoint string) {
w.EventRecorder.Record(ctx, metric.Event{
OrgID: orgID,
Endpoint: endpoint,
RequestBytes: requestBytes,
ResponseBytes: w.Writer.ResponseBytes(),
Status: w.Writer.Code(),
})
}

View File

@ -0,0 +1,145 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/influxdb/v2 (interfaces: BucketService)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
influxdb "github.com/influxdata/influxdb/v2"
)
// MockBucketService is a mock of BucketService interface
type MockBucketService struct {
ctrl *gomock.Controller
recorder *MockBucketServiceMockRecorder
}
// MockBucketServiceMockRecorder is the mock recorder for MockBucketService
type MockBucketServiceMockRecorder struct {
mock *MockBucketService
}
// NewMockBucketService creates a new mock instance
func NewMockBucketService(ctrl *gomock.Controller) *MockBucketService {
mock := &MockBucketService{ctrl: ctrl}
mock.recorder = &MockBucketServiceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockBucketService) EXPECT() *MockBucketServiceMockRecorder {
return m.recorder
}
// CreateBucket mocks base method
func (m *MockBucketService) CreateBucket(arg0 context.Context, arg1 *influxdb.Bucket) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateBucket", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// CreateBucket indicates an expected call of CreateBucket
func (mr *MockBucketServiceMockRecorder) CreateBucket(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucket", reflect.TypeOf((*MockBucketService)(nil).CreateBucket), arg0, arg1)
}
// DeleteBucket mocks base method
func (m *MockBucketService) DeleteBucket(arg0 context.Context, arg1 influxdb.ID) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteBucket", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteBucket indicates an expected call of DeleteBucket
func (mr *MockBucketServiceMockRecorder) DeleteBucket(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucket", reflect.TypeOf((*MockBucketService)(nil).DeleteBucket), arg0, arg1)
}
// FindBucket mocks base method
func (m *MockBucketService) FindBucket(arg0 context.Context, arg1 influxdb.BucketFilter) (*influxdb.Bucket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindBucket", arg0, arg1)
ret0, _ := ret[0].(*influxdb.Bucket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindBucket indicates an expected call of FindBucket
func (mr *MockBucketServiceMockRecorder) FindBucket(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucket", reflect.TypeOf((*MockBucketService)(nil).FindBucket), arg0, arg1)
}
// FindBucketByID mocks base method
func (m *MockBucketService) FindBucketByID(arg0 context.Context, arg1 influxdb.ID) (*influxdb.Bucket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindBucketByID", arg0, arg1)
ret0, _ := ret[0].(*influxdb.Bucket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindBucketByID indicates an expected call of FindBucketByID
func (mr *MockBucketServiceMockRecorder) FindBucketByID(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucketByID", reflect.TypeOf((*MockBucketService)(nil).FindBucketByID), arg0, arg1)
}
// FindBucketByName mocks base method
func (m *MockBucketService) FindBucketByName(arg0 context.Context, arg1 influxdb.ID, arg2 string) (*influxdb.Bucket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindBucketByName", arg0, arg1, arg2)
ret0, _ := ret[0].(*influxdb.Bucket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindBucketByName indicates an expected call of FindBucketByName
func (mr *MockBucketServiceMockRecorder) FindBucketByName(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucketByName", reflect.TypeOf((*MockBucketService)(nil).FindBucketByName), arg0, arg1, arg2)
}
// FindBuckets mocks base method
func (m *MockBucketService) FindBuckets(arg0 context.Context, arg1 influxdb.BucketFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "FindBuckets", varargs...)
ret0, _ := ret[0].([]*influxdb.Bucket)
ret1, _ := ret[1].(int)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// FindBuckets indicates an expected call of FindBuckets
func (mr *MockBucketServiceMockRecorder) FindBuckets(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBuckets", reflect.TypeOf((*MockBucketService)(nil).FindBuckets), varargs...)
}
// UpdateBucket mocks base method
func (m *MockBucketService) UpdateBucket(arg0 context.Context, arg1 influxdb.ID, arg2 influxdb.BucketUpdate) (*influxdb.Bucket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateBucket", arg0, arg1, arg2)
ret0, _ := ret[0].(*influxdb.Bucket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateBucket indicates an expected call of UpdateBucket
func (mr *MockBucketServiceMockRecorder) UpdateBucket(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateBucket", reflect.TypeOf((*MockBucketService)(nil).UpdateBucket), arg0, arg1, arg2)
}

View File

@ -0,0 +1,115 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/influxdb/v2 (interfaces: DBRPMappingService)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
influxdb "github.com/influxdata/influxdb/v2"
)
// MockDBRPMappingService is a mock of DBRPMappingService interface
type MockDBRPMappingService struct {
ctrl *gomock.Controller
recorder *MockDBRPMappingServiceMockRecorder
}
// MockDBRPMappingServiceMockRecorder is the mock recorder for MockDBRPMappingService
type MockDBRPMappingServiceMockRecorder struct {
mock *MockDBRPMappingService
}
// NewMockDBRPMappingService creates a new mock instance
func NewMockDBRPMappingService(ctrl *gomock.Controller) *MockDBRPMappingService {
mock := &MockDBRPMappingService{ctrl: ctrl}
mock.recorder = &MockDBRPMappingServiceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockDBRPMappingService) EXPECT() *MockDBRPMappingServiceMockRecorder {
return m.recorder
}
// Create mocks base method
func (m *MockDBRPMappingService) Create(arg0 context.Context, arg1 *influxdb.DBRPMapping) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Create", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Create indicates an expected call of Create
func (mr *MockDBRPMappingServiceMockRecorder) Create(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockDBRPMappingService)(nil).Create), arg0, arg1)
}
// Delete mocks base method
func (m *MockDBRPMappingService) Delete(arg0 context.Context, arg1, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete
func (mr *MockDBRPMappingServiceMockRecorder) Delete(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDBRPMappingService)(nil).Delete), arg0, arg1, arg2, arg3)
}
// Find mocks base method
func (m *MockDBRPMappingService) Find(arg0 context.Context, arg1 influxdb.DBRPMappingFilter) (*influxdb.DBRPMapping, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Find", arg0, arg1)
ret0, _ := ret[0].(*influxdb.DBRPMapping)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Find indicates an expected call of Find
func (mr *MockDBRPMappingServiceMockRecorder) Find(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Find", reflect.TypeOf((*MockDBRPMappingService)(nil).Find), arg0, arg1)
}
// FindBy mocks base method
func (m *MockDBRPMappingService) FindBy(arg0 context.Context, arg1, arg2, arg3 string) (*influxdb.DBRPMapping, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindBy", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*influxdb.DBRPMapping)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindBy indicates an expected call of FindBy
func (mr *MockDBRPMappingServiceMockRecorder) FindBy(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBy", reflect.TypeOf((*MockDBRPMappingService)(nil).FindBy), arg0, arg1, arg2, arg3)
}
// FindMany mocks base method
func (m *MockDBRPMappingService) FindMany(arg0 context.Context, arg1 influxdb.DBRPMappingFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.DBRPMapping, int, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "FindMany", varargs...)
ret0, _ := ret[0].([]*influxdb.DBRPMapping)
ret1, _ := ret[1].(int)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// FindMany indicates an expected call of FindMany
func (mr *MockDBRPMappingServiceMockRecorder) FindMany(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindMany", reflect.TypeOf((*MockDBRPMappingService)(nil).FindMany), varargs...)
}

View File

@ -0,0 +1,114 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/influxdb/v2 (interfaces: DBRPMappingServiceV2)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
influxdb "github.com/influxdata/influxdb/v2"
)
// MockDBRPMappingServiceV2 is a mock of DBRPMappingServiceV2 interface
type MockDBRPMappingServiceV2 struct {
ctrl *gomock.Controller
recorder *MockDBRPMappingServiceV2MockRecorder
}
// MockDBRPMappingServiceV2MockRecorder is the mock recorder for MockDBRPMappingServiceV2
type MockDBRPMappingServiceV2MockRecorder struct {
mock *MockDBRPMappingServiceV2
}
// NewMockDBRPMappingServiceV2 creates a new mock instance
func NewMockDBRPMappingServiceV2(ctrl *gomock.Controller) *MockDBRPMappingServiceV2 {
mock := &MockDBRPMappingServiceV2{ctrl: ctrl}
mock.recorder = &MockDBRPMappingServiceV2MockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockDBRPMappingServiceV2) EXPECT() *MockDBRPMappingServiceV2MockRecorder {
return m.recorder
}
// Create mocks base method
func (m *MockDBRPMappingServiceV2) Create(arg0 context.Context, arg1 *influxdb.DBRPMappingV2) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Create", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Create indicates an expected call of Create
func (mr *MockDBRPMappingServiceV2MockRecorder) Create(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).Create), arg0, arg1)
}
// Delete mocks base method
func (m *MockDBRPMappingServiceV2) Delete(arg0 context.Context, arg1, arg2 influxdb.ID) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete
func (mr *MockDBRPMappingServiceV2MockRecorder) Delete(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).Delete), arg0, arg1, arg2)
}
// FindByID mocks base method
func (m *MockDBRPMappingServiceV2) FindByID(arg0 context.Context, arg1, arg2 influxdb.ID) (*influxdb.DBRPMappingV2, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindByID", arg0, arg1, arg2)
ret0, _ := ret[0].(*influxdb.DBRPMappingV2)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindByID indicates an expected call of FindByID
func (mr *MockDBRPMappingServiceV2MockRecorder) FindByID(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindByID", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).FindByID), arg0, arg1, arg2)
}
// FindMany mocks base method
func (m *MockDBRPMappingServiceV2) FindMany(arg0 context.Context, arg1 influxdb.DBRPMappingFilterV2, arg2 ...influxdb.FindOptions) ([]*influxdb.DBRPMappingV2, int, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "FindMany", varargs...)
ret0, _ := ret[0].([]*influxdb.DBRPMappingV2)
ret1, _ := ret[1].(int)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// FindMany indicates an expected call of FindMany
func (mr *MockDBRPMappingServiceV2MockRecorder) FindMany(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindMany", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).FindMany), varargs...)
}
// Update mocks base method
func (m *MockDBRPMappingServiceV2) Update(arg0 context.Context, arg1 *influxdb.DBRPMappingV2) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Update", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Update indicates an expected call of Update
func (mr *MockDBRPMappingServiceV2MockRecorder) Update(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).Update), arg0, arg1)
}

View File

@ -0,0 +1,48 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/influxdb/v2/http/metric (interfaces: EventRecorder)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
metric "github.com/influxdata/influxdb/v2/http/metric"
)
// MockEventRecorder is a mock of EventRecorder interface
type MockEventRecorder struct {
ctrl *gomock.Controller
recorder *MockEventRecorderMockRecorder
}
// MockEventRecorderMockRecorder is the mock recorder for MockEventRecorder
type MockEventRecorderMockRecorder struct {
mock *MockEventRecorder
}
// NewMockEventRecorder creates a new mock instance
func NewMockEventRecorder(ctrl *gomock.Controller) *MockEventRecorder {
mock := &MockEventRecorder{ctrl: ctrl}
mock.recorder = &MockEventRecorderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockEventRecorder) EXPECT() *MockEventRecorderMockRecorder {
return m.recorder
}
// Record mocks base method
func (m *MockEventRecorder) Record(arg0 context.Context, arg1 metric.Event) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Record", arg0, arg1)
}
// Record indicates an expected call of Record
func (mr *MockEventRecorderMockRecorder) Record(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Record", reflect.TypeOf((*MockEventRecorder)(nil).Record), arg0, arg1)
}

View File

@ -0,0 +1,130 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/influxdb/v2 (interfaces: OrganizationService)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
influxdb "github.com/influxdata/influxdb/v2"
)
// MockOrganizationService is a mock of OrganizationService interface
type MockOrganizationService struct {
ctrl *gomock.Controller
recorder *MockOrganizationServiceMockRecorder
}
// MockOrganizationServiceMockRecorder is the mock recorder for MockOrganizationService
type MockOrganizationServiceMockRecorder struct {
mock *MockOrganizationService
}
// NewMockOrganizationService creates a new mock instance
func NewMockOrganizationService(ctrl *gomock.Controller) *MockOrganizationService {
mock := &MockOrganizationService{ctrl: ctrl}
mock.recorder = &MockOrganizationServiceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockOrganizationService) EXPECT() *MockOrganizationServiceMockRecorder {
return m.recorder
}
// CreateOrganization mocks base method
func (m *MockOrganizationService) CreateOrganization(arg0 context.Context, arg1 *influxdb.Organization) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateOrganization", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// CreateOrganization indicates an expected call of CreateOrganization
func (mr *MockOrganizationServiceMockRecorder) CreateOrganization(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrganization", reflect.TypeOf((*MockOrganizationService)(nil).CreateOrganization), arg0, arg1)
}
// DeleteOrganization mocks base method
func (m *MockOrganizationService) DeleteOrganization(arg0 context.Context, arg1 influxdb.ID) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteOrganization", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteOrganization indicates an expected call of DeleteOrganization
func (mr *MockOrganizationServiceMockRecorder) DeleteOrganization(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOrganization", reflect.TypeOf((*MockOrganizationService)(nil).DeleteOrganization), arg0, arg1)
}
// FindOrganization mocks base method
func (m *MockOrganizationService) FindOrganization(arg0 context.Context, arg1 influxdb.OrganizationFilter) (*influxdb.Organization, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOrganization", arg0, arg1)
ret0, _ := ret[0].(*influxdb.Organization)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOrganization indicates an expected call of FindOrganization
func (mr *MockOrganizationServiceMockRecorder) FindOrganization(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOrganization", reflect.TypeOf((*MockOrganizationService)(nil).FindOrganization), arg0, arg1)
}
// FindOrganizationByID mocks base method
func (m *MockOrganizationService) FindOrganizationByID(arg0 context.Context, arg1 influxdb.ID) (*influxdb.Organization, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOrganizationByID", arg0, arg1)
ret0, _ := ret[0].(*influxdb.Organization)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOrganizationByID indicates an expected call of FindOrganizationByID
func (mr *MockOrganizationServiceMockRecorder) FindOrganizationByID(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOrganizationByID", reflect.TypeOf((*MockOrganizationService)(nil).FindOrganizationByID), arg0, arg1)
}
// FindOrganizations mocks base method
func (m *MockOrganizationService) FindOrganizations(arg0 context.Context, arg1 influxdb.OrganizationFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "FindOrganizations", varargs...)
ret0, _ := ret[0].([]*influxdb.Organization)
ret1, _ := ret[1].(int)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// FindOrganizations indicates an expected call of FindOrganizations
func (mr *MockOrganizationServiceMockRecorder) FindOrganizations(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOrganizations", reflect.TypeOf((*MockOrganizationService)(nil).FindOrganizations), varargs...)
}
// UpdateOrganization mocks base method
func (m *MockOrganizationService) UpdateOrganization(arg0 context.Context, arg1 influxdb.ID, arg2 influxdb.OrganizationUpdate) (*influxdb.Organization, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateOrganization", arg0, arg1, arg2)
ret0, _ := ret[0].(*influxdb.Organization)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateOrganization indicates an expected call of UpdateOrganization
func (mr *MockOrganizationServiceMockRecorder) UpdateOrganization(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateOrganization", reflect.TypeOf((*MockOrganizationService)(nil).UpdateOrganization), arg0, arg1, arg2)
}

View File

@ -0,0 +1,51 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/influxdb/v2/storage (interfaces: PointsWriter)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
influxdb "github.com/influxdata/influxdb/v2"
models "github.com/influxdata/influxdb/v2/models"
)
// MockPointsWriter is a mock of PointsWriter interface
type MockPointsWriter struct {
ctrl *gomock.Controller
recorder *MockPointsWriterMockRecorder
}
// MockPointsWriterMockRecorder is the mock recorder for MockPointsWriter
type MockPointsWriterMockRecorder struct {
mock *MockPointsWriter
}
// NewMockPointsWriter creates a new mock instance
func NewMockPointsWriter(ctrl *gomock.Controller) *MockPointsWriter {
mock := &MockPointsWriter{ctrl: ctrl}
mock.recorder = &MockPointsWriterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockPointsWriter) EXPECT() *MockPointsWriterMockRecorder {
return m.recorder
}
// WritePoints mocks base method
func (m *MockPointsWriter) WritePoints(arg0 context.Context, arg1, arg2 influxdb.ID, arg3 []models.Point) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WritePoints", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// WritePoints indicates an expected call of WritePoints
func (mr *MockPointsWriterMockRecorder) WritePoints(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WritePoints", reflect.TypeOf((*MockPointsWriter)(nil).WritePoints), arg0, arg1, arg2, arg3)
}

View File

@ -4,15 +4,17 @@ import (
"net/http"
"strings"
"github.com/influxdata/influxdb/v2/http/legacy"
"github.com/influxdata/influxdb/v2/kit/feature"
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
)
// PlatformHandler is a collection of all the service handlers.
type PlatformHandler struct {
AssetHandler *AssetHandler
DocsHandler http.HandlerFunc
APIHandler http.Handler
AssetHandler *AssetHandler
DocsHandler http.HandlerFunc
APIHandler http.Handler
LegacyHandler http.Handler
}
// NewPlatformHandler returns a platform handler that serves the API and associated assets.
@ -37,15 +39,27 @@ func NewPlatformHandler(b *APIBackend, opts ...APIHandlerOptFn) *PlatformHandler
wrappedHandler := kithttp.SetCORS(h)
wrappedHandler = kithttp.SkipOptions(wrappedHandler)
legacyBackend := newLegacyBackend(b)
lh := newLegacyHandler(legacyBackend, legacy.HandlerConfig{})
return &PlatformHandler{
AssetHandler: assetHandler,
DocsHandler: Redoc("/api/v2/swagger.json"),
APIHandler: wrappedHandler,
AssetHandler: assetHandler,
DocsHandler: Redoc("/api/v2/swagger.json"),
APIHandler: wrappedHandler,
LegacyHandler: legacy.NewInflux1xAuthenticationHandler(lh, b.AuthorizationService, b.UserService, b.HTTPErrorHandler),
}
}
// ServeHTTP delegates a request to the appropriate subhandler.
func (h *PlatformHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// TODO(affo): change this to be mounted prefixes: https://github.com/influxdata/idpe/issues/6689.
if r.URL.Path == "/write" ||
r.URL.Path == "/query" ||
r.URL.Path == "/ping" {
h.LegacyHandler.ServeHTTP(w, r)
return
}
if strings.HasPrefix(r.URL.Path, "/docs") {
h.DocsHandler.ServeHTTP(w, r)
return

View File

@ -0,0 +1,25 @@
package points
import (
"compress/gzip"
"io"
io2 "github.com/influxdata/influxdb/v2/kit/io"
)
// BatchReadCloser (potentially) wraps an io.ReadCloser in Gzip
// decompression and limits the reading to a specific number of bytes.
func BatchReadCloser(rc io.ReadCloser, encoding string, maxBatchSizeBytes int64) (io.ReadCloser, error) {
switch encoding {
case "gzip", "x-gzip":
var err error
rc, err = gzip.NewReader(rc)
if err != nil {
return nil, err
}
}
if maxBatchSizeBytes > 0 {
rc = io2.NewLimitedReadCloser(rc, maxBatchSizeBytes)
}
return rc, nil
}

View File

@ -0,0 +1,140 @@
package points
import (
"compress/gzip"
"context"
"errors"
"io"
"io/ioutil"
"time"
"github.com/influxdata/influxdb/v2"
io2 "github.com/influxdata/influxdb/v2/kit/io"
"github.com/influxdata/influxdb/v2/kit/tracing"
"github.com/influxdata/influxdb/v2/models"
"github.com/opentracing/opentracing-go"
"go.uber.org/zap"
"istio.io/pkg/log"
)
var (
// ErrMaxBatchSizeExceeded is returned when a points batch exceeds
// the defined upper limit in bytes. This pertains to the size of the
// batch after inflation from any compression (i.e. ungzipped).
ErrMaxBatchSizeExceeded = errors.New("points batch is too large")
)
const (
opPointsWriter = "http/pointsWriter"
msgUnableToReadData = "unable to read data"
msgWritingRequiresPoints = "writing requires points"
)
// ParsedPoints contains the points parsed as well as the total number of bytes
// after decompression.
type ParsedPoints struct {
Points models.Points
RawSize int
}
// Parser parses batches of Points.
type Parser struct {
Precision string
//ParserOptions []models.ParserOption
}
// Parse parses the points from an io.ReadCloser for a specific Bucket.
func (pw *Parser) Parse(ctx context.Context, orgID, bucketID influxdb.ID, rc io.ReadCloser) (*ParsedPoints, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "write points")
defer span.Finish()
return pw.parsePoints(ctx, orgID, bucketID, rc)
}
func (pw *Parser) parsePoints(ctx context.Context, orgID, bucketID influxdb.ID, rc io.ReadCloser) (*ParsedPoints, error) {
data, err := readAll(ctx, rc)
if err != nil {
code := influxdb.EInternal
if errors.Is(err, ErrMaxBatchSizeExceeded) {
code = influxdb.ETooLarge
} else if errors.Is(err, gzip.ErrHeader) || errors.Is(err, gzip.ErrChecksum) {
code = influxdb.EInvalid
}
return nil, &influxdb.Error{
Code: code,
Op: opPointsWriter,
Msg: msgUnableToReadData,
Err: err,
}
}
requestBytes := len(data)
if requestBytes == 0 {
return nil, &influxdb.Error{
Op: opPointsWriter,
Code: influxdb.EInvalid,
Msg: msgWritingRequiresPoints,
}
}
span, _ := tracing.StartSpanFromContextWithOperationName(ctx, "encoding and parsing")
points, err := models.ParsePointsWithPrecision(data, time.Now().UTC(), pw.Precision)
span.LogKV("values_total", len(points))
span.Finish()
if err != nil {
log.Error("Error parsing points", zap.Error(err))
code := influxdb.EInvalid
// TODO - backport these
// if errors.Is(err, models.ErrLimitMaxBytesExceeded) ||
// errors.Is(err, models.ErrLimitMaxLinesExceeded) ||
// errors.Is(err, models.ErrLimitMaxValuesExceeded) {
// code = influxdb.ETooLarge
// }
return nil, &influxdb.Error{
Code: code,
Op: opPointsWriter,
Msg: "",
Err: err,
}
}
return &ParsedPoints{
Points: points,
RawSize: requestBytes,
}, nil
}
func readAll(ctx context.Context, rc io.ReadCloser) (data []byte, err error) {
defer func() {
if cerr := rc.Close(); cerr != nil && err == nil {
if errors.Is(cerr, io2.ErrReadLimitExceeded) {
cerr = ErrMaxBatchSizeExceeded
}
err = cerr
}
}()
span, _ := tracing.StartSpanFromContextWithOperationName(ctx, "read request body")
defer func() {
span.LogKV("request_bytes", len(data))
span.Finish()
}()
data, err = ioutil.ReadAll(rc)
if err != nil {
return nil, err
}
return data, nil
}
// NewParser returns a new Parser
func NewParser(precision string /*parserOptions ...models.ParserOption*/) *Parser {
return &Parser{
Precision: precision,
//ParserOptions: parserOptions,
}
}

View File

@ -18,6 +18,7 @@ import (
"github.com/influxdata/flux/ast"
"github.com/influxdata/flux/csv"
"github.com/influxdata/flux/lang"
"github.com/influxdata/flux/repl"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/jsonweb"
"github.com/influxdata/influxdb/v2/query"
@ -31,10 +32,11 @@ type QueryRequest struct {
Query string `json:"query"`
// Flux fields
Extern json.RawMessage `json:"extern,omitempty"`
AST json.RawMessage `json:"ast,omitempty"`
Dialect QueryDialect `json:"dialect"`
Now time.Time `json:"now"`
Extern *ast.File `json:"extern,omitempty"`
Spec *flux.Spec `json:"spec,omitempty"`
AST *ast.Package `json:"ast,omitempty"`
Dialect QueryDialect `json:"dialect"`
Now time.Time `json:"now"`
// InfluxQL fields
Bucket string `json:"bucket,omitempty"`
@ -269,13 +271,19 @@ func (r QueryRequest) proxyRequest(now func() time.Time) (*query.ProxyRequest, e
Query: r.Query,
}
}
} else if len(r.AST) > 0 {
} else if r.AST != nil {
c := lang.ASTCompiler{
Extern: r.Extern,
AST: r.AST,
Now: n,
AST: r.AST,
Now: n,
}
if r.Extern != nil {
c.PrependFile(r.Extern)
}
compiler = c
} else if r.Spec != nil {
compiler = repl.Compiler{
Spec: r.Spec,
}
}
delimiter, _ := utf8.DecodeRuneInString(r.Dialect.Delimiter)

View File

@ -245,7 +245,7 @@ func TestFluxHandler_postFluxAST(t *testing.T) {
name: "get ast from()",
w: httptest.NewRecorder(),
r: httptest.NewRequest("POST", "/api/v2/query/ast", bytes.NewBufferString(`{"query": "from()"}`)),
want: `{"ast":{"type":"Package","package":"main","files":[{"type":"File","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"metadata":"parser-type=rust","package":null,"imports":null,"body":[{"type":"ExpressionStatement","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"expression":{"type":"CallExpression","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"callee":{"type":"Identifier","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":5},"source":"from"},"name":"from"}}}]}]}}
want: `{"ast":{"type":"Package","package":"main","files":[{"type":"File","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"metadata":"parser-type=go","package":null,"imports":null,"body":[{"type":"ExpressionStatement","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"expression":{"type":"CallExpression","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"callee":{"type":"Identifier","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":5},"source":"from"},"name":"from"}}}]}]}}
`,
status: http.StatusOK,
},

View File

@ -3,7 +3,6 @@ package http
import (
"bytes"
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"reflect"
@ -34,7 +33,7 @@ var cmpOptions = cmp.Options{
func TestQueryRequest_WithDefaults(t *testing.T) {
type fields struct {
Spec *flux.Spec
AST json.RawMessage
AST *ast.Package
Query string
Type string
Dialect QueryDialect
@ -60,6 +59,7 @@ func TestQueryRequest_WithDefaults(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := QueryRequest{
Spec: tt.fields.Spec,
AST: tt.fields.AST,
Query: tt.fields.Query,
Type: tt.fields.Type,
@ -75,8 +75,9 @@ func TestQueryRequest_WithDefaults(t *testing.T) {
func TestQueryRequest_Validate(t *testing.T) {
type fields struct {
Extern json.RawMessage
AST json.RawMessage
Extern *ast.File
Spec *flux.Spec
AST *ast.Package
Query string
Type string
Dialect QueryDialect
@ -94,6 +95,19 @@ func TestQueryRequest_Validate(t *testing.T) {
},
wantErr: true,
},
{
name: "query cannot have both extern and spec",
fields: fields{
Extern: &ast.File{},
Spec: &flux.Spec{},
Type: "flux",
Dialect: QueryDialect{
Delimiter: ",",
DateTimeFormat: "RFC3339",
},
},
wantErr: true,
},
{
name: "requires flux type",
fields: fields{
@ -175,6 +189,7 @@ func TestQueryRequest_Validate(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
r := QueryRequest{
Extern: tt.fields.Extern,
Spec: tt.fields.Spec,
AST: tt.fields.AST,
Query: tt.fields.Query,
Type: tt.fields.Type,
@ -190,9 +205,9 @@ func TestQueryRequest_Validate(t *testing.T) {
func TestQueryRequest_proxyRequest(t *testing.T) {
type fields struct {
Extern json.RawMessage
Extern *ast.File
Spec *flux.Spec
AST json.RawMessage
AST *ast.Package
Query string
Type string
Dialect QueryDialect
@ -243,7 +258,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
{
name: "valid AST",
fields: fields{
AST: mustMarshal(&ast.Package{}),
AST: &ast.Package{},
Type: "flux",
Dialect: QueryDialect{
Delimiter: ",",
@ -256,7 +271,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
want: &query.ProxyRequest{
Request: query.Request{
Compiler: lang.ASTCompiler{
AST: mustMarshal(&ast.Package{}),
AST: &ast.Package{},
Now: time.Unix(1, 1),
},
},
@ -271,7 +286,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
{
name: "valid AST with calculated now",
fields: fields{
AST: mustMarshal(&ast.Package{}),
AST: &ast.Package{},
Type: "flux",
Dialect: QueryDialect{
Delimiter: ",",
@ -283,7 +298,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
want: &query.ProxyRequest{
Request: query.Request{
Compiler: lang.ASTCompiler{
AST: mustMarshal(&ast.Package{}),
AST: &ast.Package{},
Now: time.Unix(2, 2),
},
},
@ -298,7 +313,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
{
name: "valid AST with extern",
fields: fields{
Extern: mustMarshal(&ast.File{
Extern: &ast.File{
Body: []ast.Statement{
&ast.OptionStatement{
Assignment: &ast.VariableAssignment{
@ -307,8 +322,8 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
},
},
},
}),
AST: mustMarshal(&ast.Package{}),
},
AST: &ast.Package{},
Type: "flux",
Dialect: QueryDialect{
Delimiter: ",",
@ -320,17 +335,20 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
want: &query.ProxyRequest{
Request: query.Request{
Compiler: lang.ASTCompiler{
Extern: mustMarshal(&ast.File{
Body: []ast.Statement{
&ast.OptionStatement{
Assignment: &ast.VariableAssignment{
ID: &ast.Identifier{Name: "x"},
Init: &ast.IntegerLiteral{Value: 0},
AST: &ast.Package{
Files: []*ast.File{
{
Body: []ast.Statement{
&ast.OptionStatement{
Assignment: &ast.VariableAssignment{
ID: &ast.Identifier{Name: "x"},
Init: &ast.IntegerLiteral{Value: 0},
},
},
},
},
},
}),
AST: mustMarshal(&ast.Package{}),
},
Now: time.Unix(1, 1),
},
},
@ -347,6 +365,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
r := QueryRequest{
Extern: tt.fields.Extern,
Spec: tt.fields.Spec,
AST: tt.fields.AST,
Query: tt.fields.Query,
Type: tt.fields.Type,
@ -366,14 +385,6 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
}
}
func mustMarshal(p ast.Node) []byte {
bs, err := json.Marshal(p)
if err != nil {
panic(err)
}
return bs
}
func Test_decodeQueryRequest(t *testing.T) {
type args struct {
ctx context.Context
@ -470,25 +481,6 @@ func Test_decodeQueryRequest(t *testing.T) {
}
func Test_decodeProxyQueryRequest(t *testing.T) {
externJSON := `{
"type": "File",
"body": [
{
"type": "OptionStatement",
"assignment": {
"type": "VariableAssignment",
"id": {
"type": "Identifier",
"name": "x"
},
"init": {
"type": "IntegerLiteral",
"value": "0"
}
}
}
]
}`
type args struct {
ctx context.Context
r *http.Request
@ -533,7 +525,25 @@ func Test_decodeProxyQueryRequest(t *testing.T) {
args: args{
r: httptest.NewRequest("POST", "/", bytes.NewBufferString(`
{
"extern": `+externJSON+`,
"extern": {
"type": "File",
"body": [
{
"type": "OptionStatement",
"assignment": {
"type": "VariableAssignment",
"id": {
"type": "Identifier",
"name": "x"
},
"init": {
"type": "IntegerLiteral",
"value": "0"
}
}
}
]
},
"query": "from(bucket: \"mybucket\")"
}
`)),
@ -549,8 +559,17 @@ func Test_decodeProxyQueryRequest(t *testing.T) {
Request: query.Request{
OrganizationID: func() platform.ID { s, _ := platform.IDFromString("deadbeefdeadbeef"); return *s }(),
Compiler: lang.FluxCompiler{
Extern: []byte(externJSON),
Query: `from(bucket: "mybucket")`,
Extern: &ast.File{
Body: []ast.Statement{
&ast.OptionStatement{
Assignment: &ast.VariableAssignment{
ID: &ast.Identifier{Name: "x"},
Init: &ast.IntegerLiteral{Value: 0},
},
},
},
},
Query: `from(bucket: "mybucket")`,
},
},
Dialect: &csv.Dialect{
@ -610,59 +629,3 @@ func Test_decodeProxyQueryRequest(t *testing.T) {
})
}
}
func TestProxyRequestToQueryRequest_Compilers(t *testing.T) {
tests := []struct {
name string
pr query.ProxyRequest
want QueryRequest
}{
{
name: "flux compiler copied",
pr: query.ProxyRequest{
Dialect: &query.NoContentDialect{},
Request: query.Request{
Compiler: lang.FluxCompiler{
Query: `howdy`,
Now: time.Unix(45, 45),
},
},
},
want: QueryRequest{
Type: "flux",
Query: `howdy`,
PreferNoContent: true,
Now: time.Unix(45, 45),
},
},
{
name: "AST compiler copied",
pr: query.ProxyRequest{
Dialect: &query.NoContentDialect{},
Request: query.Request{
Compiler: lang.ASTCompiler{
Now: time.Unix(45, 45),
AST: mustMarshal(&ast.Package{}),
},
},
},
want: QueryRequest{
Type: "flux",
PreferNoContent: true,
AST: mustMarshal(&ast.Package{}),
Now: time.Unix(45, 45),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := QueryRequestFromProxyRequest(&tt.pr)
if err != nil {
t.Error(err)
} else if !reflect.DeepEqual(*got, tt.want) {
t.Errorf("QueryRequestFromProxyRequest = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -3,32 +3,20 @@ package http
import (
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/influxdata/httprouter"
"github.com/influxdata/influxdb/v2"
pcontext "github.com/influxdata/influxdb/v2/context"
"github.com/influxdata/influxdb/v2/http/metric"
kitio "github.com/influxdata/influxdb/v2/kit/io"
"github.com/influxdata/influxdb/v2/http/points"
"github.com/influxdata/influxdb/v2/kit/tracing"
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
"github.com/influxdata/influxdb/v2/models"
"github.com/influxdata/influxdb/v2/storage"
"github.com/influxdata/influxdb/v2/tsdb"
"github.com/opentracing/opentracing-go"
"go.uber.org/zap"
"istio.io/pkg/log"
)
var (
// ErrMaxBatchSizeExceeded is returned when a points batch exceeds
// the defined upper limit in bytes. This pertains to the size of the
// batch after inflation from any compression (i.e. ungzipped).
ErrMaxBatchSizeExceeded = errors.New("points batch is too large")
)
// WriteBackend is all services and associated parameters required to construct
@ -67,7 +55,7 @@ type WriteHandler struct {
router *httprouter.Router
log *zap.Logger
maxBatchSizeBytes int64
parserOptions []models.ParserOption
// parserOptions []models.ParserOption
}
// WriteHandlerOption is a functional option for a *WriteHandler
@ -81,11 +69,11 @@ func WithMaxBatchSizeBytes(n int64) WriteHandlerOption {
}
}
func WithParserOptions(opts ...models.ParserOption) WriteHandlerOption {
return func(w *WriteHandler) {
w.parserOptions = opts
}
}
//func WithParserOptions(opts ...models.ParserOption) WriteHandlerOption {
// return func(w *WriteHandler) {
// w.parserOptions = opts
// }
//}
// Prefix provides the route prefix.
func (*WriteHandler) Prefix() string {
@ -93,14 +81,10 @@ func (*WriteHandler) Prefix() string {
}
const (
prefixWrite = "/api/v2/write"
msgInvalidGzipHeader = "gzipped HTTP body contains an invalid header"
msgInvalidPrecision = "invalid precision; valid precision units are ns, us, ms, and s"
msgUnableToReadData = "unable to read data"
msgWritingRequiresPoints = "writing requires points"
msgUnexpectedWriteError = "unexpected error writing points to database"
prefixWrite = "/api/v2/write"
msgInvalidGzipHeader = "gzipped HTTP body contains an invalid header"
msgInvalidPrecision = "invalid precision; valid precision units are ns, us, ms, and s"
opPointsWriter = "http/pointsWriter"
opWriteHandler = "http/writeHandler"
)
@ -192,16 +176,17 @@ func (h *WriteHandler) handleWrite(w http.ResponseWriter, r *http.Request) {
return
}
opts := append([]models.ParserOption{}, h.parserOptions...)
opts = append(opts, models.WithParserPrecision(req.Precision))
parsed, err := NewPointsParser(opts...).ParsePoints(ctx, org.ID, bucket.ID, req.Body)
// TODO: Backport?
//opts := append([]models.ParserOption{}, h.parserOptions...)
//opts = append(opts, models.WithParserPrecision(req.Precision))
parsed, err := points.NewParser(req.Precision).Parse(ctx, org.ID, bucket.ID, req.Body)
if err != nil {
h.HandleHTTPError(ctx, err, sw)
return
}
requestBytes = parsed.RawSize
if err := h.PointsWriter.WritePoints(ctx, parsed.Points); err != nil {
if err := h.PointsWriter.WritePoints(ctx, org.ID, bucket.ID, parsed.Points); err != nil {
h.HandleHTTPError(ctx, &influxdb.Error{
Code: influxdb.EInternal,
Op: opWriteHandler,
@ -237,131 +222,6 @@ func checkBucketWritePermissions(auth influxdb.Authorizer, orgID, bucketID influ
return nil
}
// PointBatchReadCloser (potentially) wraps an io.ReadCloser in Gzip
// decompression and limits the reading to a specific number of bytes.
func PointBatchReadCloser(rc io.ReadCloser, encoding string, maxBatchSizeBytes int64) (io.ReadCloser, error) {
switch encoding {
case "gzip", "x-gzip":
var err error
rc, err = gzip.NewReader(rc)
if err != nil {
return nil, err
}
}
if maxBatchSizeBytes > 0 {
rc = kitio.NewLimitedReadCloser(rc, maxBatchSizeBytes)
}
return rc, nil
}
// NewPointsParser returns a new PointsParser
func NewPointsParser(parserOptions ...models.ParserOption) *PointsParser {
return &PointsParser{
ParserOptions: parserOptions,
}
}
// ParsedPoints contains the points parsed as well as the total number of bytes
// after decompression.
type ParsedPoints struct {
Points models.Points
RawSize int
}
// PointsParser parses batches of Points.
type PointsParser struct {
ParserOptions []models.ParserOption
}
// ParsePoints parses the points from an io.ReadCloser for a specific Bucket.
func (pw *PointsParser) ParsePoints(ctx context.Context, orgID, bucketID influxdb.ID, rc io.ReadCloser) (*ParsedPoints, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "write points")
defer span.Finish()
return pw.parsePoints(ctx, orgID, bucketID, rc)
}
func (pw *PointsParser) parsePoints(ctx context.Context, orgID, bucketID influxdb.ID, rc io.ReadCloser) (*ParsedPoints, error) {
data, err := readAll(ctx, rc)
if err != nil {
code := influxdb.EInternal
if errors.Is(err, ErrMaxBatchSizeExceeded) {
code = influxdb.ETooLarge
} else if errors.Is(err, gzip.ErrHeader) || errors.Is(err, gzip.ErrChecksum) {
code = influxdb.EInvalid
}
return nil, &influxdb.Error{
Code: code,
Op: opPointsWriter,
Msg: msgUnableToReadData,
Err: err,
}
}
requestBytes := len(data)
if requestBytes == 0 {
return nil, &influxdb.Error{
Op: opPointsWriter,
Code: influxdb.EInvalid,
Msg: msgWritingRequiresPoints,
}
}
span, _ := tracing.StartSpanFromContextWithOperationName(ctx, "encoding and parsing")
encoded := tsdb.EncodeName(orgID, bucketID)
mm := models.EscapeMeasurement(encoded[:])
points, err := models.ParsePointsWithOptions(data, mm, pw.ParserOptions...)
span.LogKV("values_total", len(points))
span.Finish()
if err != nil {
log.Error("Error parsing points", zap.Error(err))
code := influxdb.EInvalid
if errors.Is(err, models.ErrLimitMaxBytesExceeded) ||
errors.Is(err, models.ErrLimitMaxLinesExceeded) ||
errors.Is(err, models.ErrLimitMaxValuesExceeded) {
code = influxdb.ETooLarge
}
return nil, &influxdb.Error{
Code: code,
Op: opPointsWriter,
Msg: "",
Err: err,
}
}
return &ParsedPoints{
Points: points,
RawSize: requestBytes,
}, nil
}
func readAll(ctx context.Context, rc io.ReadCloser) (data []byte, err error) {
defer func() {
if cerr := rc.Close(); cerr != nil && err == nil {
if errors.Is(cerr, kitio.ErrReadLimitExceeded) {
cerr = ErrMaxBatchSizeExceeded
}
err = cerr
}
}()
span, _ := tracing.StartSpanFromContextWithOperationName(ctx, "read request body")
defer func() {
span.LogKV("request_bytes", len(data))
span.Finish()
}()
data, err = ioutil.ReadAll(rc)
if err != nil {
return nil, err
}
return data, nil
}
// writeRequest is a request object holding information about a batch of points
// to be written to a Bucket.
type writeRequest struct {
@ -398,7 +258,7 @@ func decodeWriteRequest(ctx context.Context, r *http.Request, maxBatchSizeBytes
}
encoding := r.Header.Get("Content-Encoding")
body, err := PointBatchReadCloser(r.Body, encoding, maxBatchSizeBytes)
body, err := points.BatchReadCloser(r.Body, encoding, maxBatchSizeBytes)
if err != nil {
return nil, err
}

View File

@ -16,7 +16,6 @@ import (
httpmock "github.com/influxdata/influxdb/v2/http/mock"
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
"github.com/influxdata/influxdb/v2/mock"
"github.com/influxdata/influxdb/v2/models"
influxtesting "github.com/influxdata/influxdb/v2/testing"
"go.uber.org/zap/zaptest"
)
@ -293,60 +292,6 @@ func TestWriteHandler_handleWrite(t *testing.T) {
body: `{"code":"request too large","message":"unable to read data: points batch is too large"}`,
},
},
{
name: "bytes limit rejected",
request: request{
org: "043e0780ee2b1000",
bucket: "04504b356e23b000",
body: "m1,t1=v1 f1=1",
auth: bucketWritePermission("043e0780ee2b1000", "04504b356e23b000"),
},
state: state{
org: testOrg("043e0780ee2b1000"),
bucket: testBucket("043e0780ee2b1000", "04504b356e23b000"),
opts: []WriteHandlerOption{WithParserOptions(models.WithParserMaxBytes(5))},
},
wants: wants{
code: 413,
body: `{"code":"request too large","message":"points: number of allocated bytes exceeded"}`,
},
},
{
name: "lines limit rejected",
request: request{
org: "043e0780ee2b1000",
bucket: "04504b356e23b000",
body: "m1,t1=v1 f1=1\nm1,t1=v1 f1=1\nm1,t1=v1 f1=1\n",
auth: bucketWritePermission("043e0780ee2b1000", "04504b356e23b000"),
},
state: state{
org: testOrg("043e0780ee2b1000"),
bucket: testBucket("043e0780ee2b1000", "04504b356e23b000"),
opts: []WriteHandlerOption{WithParserOptions(models.WithParserMaxLines(2))},
},
wants: wants{
code: 413,
body: `{"code":"request too large","message":"points: number of lines exceeded"}`,
},
},
{
name: "values limit rejected",
request: request{
org: "043e0780ee2b1000",
bucket: "04504b356e23b000",
body: "m1,t1=v1 f1=1,f2=2\nm1,t1=v1 f1=1,f2=2\nm1,t1=v1 f1=1,f2=2\n",
auth: bucketWritePermission("043e0780ee2b1000", "04504b356e23b000"),
},
state: state{
org: testOrg("043e0780ee2b1000"),
bucket: testBucket("043e0780ee2b1000", "04504b356e23b000"),
opts: []WriteHandlerOption{WithParserOptions(models.WithParserMaxValues(4))},
},
wants: wants{
code: 413,
body: `{"code":"request too large","message":"points: number of values exceeded"}`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View File

@ -0,0 +1,70 @@
package control
import (
"github.com/prometheus/client_golang/prometheus"
)
// controllerMetrics holds metrics related to the query controller.
type ControllerMetrics struct {
Requests *prometheus.CounterVec
NotImplemented *prometheus.CounterVec
RequestsLatency *prometheus.HistogramVec
ExecutingDuration *prometheus.HistogramVec
}
const (
LabelSuccess = "success"
LabelGenericError = "generic_err"
LabelParseErr = "parse_err"
LabelInterruptedErr = "interrupt_err"
LabelRuntimeError = "runtime_error"
LabelNotImplError = "not_implemented"
LabelNotExecuted = "not_executed"
)
func NewControllerMetrics(labels []string) *ControllerMetrics {
const (
namespace = "influxql"
subsystem = "service"
)
return &ControllerMetrics{
Requests: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "requests_total",
Help: "Count of the query requests",
}, append(labels, "result")),
NotImplemented: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "not_implemented_total",
Help: "Count of the query requests executing unimplemented operations",
}, []string{"operation"}),
RequestsLatency: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "requests_latency_seconds",
Help: "Histogram of times spent for end-to-end latency (from issuing query request, to receiving the first byte of the response)",
Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7),
}, append(labels, "result")),
ExecutingDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "executing_duration_seconds",
Help: "Histogram of times spent executing queries",
Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7),
}, append(labels, "result")),
}
}
func (cm *ControllerMetrics) PrometheusCollectors() []prometheus.Collector {
return []prometheus.Collector{
cm.Requests,
cm.NotImplemented,
cm.ExecutingDuration,
}
}

15
influxql/errors.go Normal file
View File

@ -0,0 +1,15 @@
package influxql
// NotImplementedError is returned when a specific operation is unavailable.
type NotImplementedError struct {
Op string // Op is the name of the unimplemented operation
}
func (e *NotImplementedError) Error() string {
return "not implemented: " + e.Op
}
// ErrNotImplemented creates a NotImplementedError specifying op is unavailable.
func ErrNotImplemented(op string) error {
return &NotImplementedError{Op: op}
}

View File

@ -0,0 +1,24 @@
package mock
import (
"context"
"io"
"github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/kit/check"
)
var _ influxql.ProxyQueryService = (*ProxyQueryService)(nil)
// ProxyQueryService mocks the InfluxQL QueryService for testing.
type ProxyQueryService struct {
QueryF func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error)
}
func (s *ProxyQueryService) Query(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) {
return s.QueryF(ctx, w, req)
}
func (s *ProxyQueryService) Check(ctx context.Context) check.Response {
return check.Response{Name: "Mock InfluxQL Proxy Query Service", Status: check.StatusPass}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

88
influxql/query/cast.go Normal file
View File

@ -0,0 +1,88 @@
package query
import "github.com/influxdata/influxql"
// castToType will coerce the underlying interface type to another
// interface depending on the type.
func castToType(v interface{}, typ influxql.DataType) interface{} {
switch typ {
case influxql.Float:
if val, ok := castToFloat(v); ok {
v = val
}
case influxql.Integer:
if val, ok := castToInteger(v); ok {
v = val
}
case influxql.Unsigned:
if val, ok := castToUnsigned(v); ok {
v = val
}
case influxql.String, influxql.Tag:
if val, ok := castToString(v); ok {
v = val
}
case influxql.Boolean:
if val, ok := castToBoolean(v); ok {
v = val
}
}
return v
}
func castToFloat(v interface{}) (float64, bool) {
switch v := v.(type) {
case float64:
return v, true
case int64:
return float64(v), true
case uint64:
return float64(v), true
default:
return float64(0), false
}
}
func castToInteger(v interface{}) (int64, bool) {
switch v := v.(type) {
case float64:
return int64(v), true
case int64:
return v, true
case uint64:
return int64(v), true
default:
return int64(0), false
}
}
func castToUnsigned(v interface{}) (uint64, bool) {
switch v := v.(type) {
case float64:
return uint64(v), true
case uint64:
return v, true
case int64:
return uint64(v), true
default:
return uint64(0), false
}
}
func castToString(v interface{}) (string, bool) {
switch v := v.(type) {
case string:
return v, true
default:
return "", false
}
}
func castToBoolean(v interface{}) (bool, bool) {
switch v := v.(type) {
case bool:
return v, true
default:
return false, false
}
}

1206
influxql/query/compile.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,439 @@
package query_test
import (
"context"
"testing"
"github.com/influxdata/influxdb/v2/influxql/query"
"github.com/influxdata/influxql"
)
func TestCompile_Success(t *testing.T) {
for _, tt := range []string{
`SELECT time, value FROM cpu`,
`SELECT value FROM cpu`,
`SELECT value, host FROM cpu`,
`SELECT * FROM cpu`,
`SELECT time, * FROM cpu`,
`SELECT value, * FROM cpu`,
`SELECT max(value) FROM cpu`,
`SELECT max(value), host FROM cpu`,
`SELECT max(value), * FROM cpu`,
`SELECT max(*) FROM cpu`,
`SELECT max(/val/) FROM cpu`,
`SELECT min(value) FROM cpu`,
`SELECT min(value), host FROM cpu`,
`SELECT min(value), * FROM cpu`,
`SELECT min(*) FROM cpu`,
`SELECT min(/val/) FROM cpu`,
`SELECT first(value) FROM cpu`,
`SELECT first(value), host FROM cpu`,
`SELECT first(value), * FROM cpu`,
`SELECT first(*) FROM cpu`,
`SELECT first(/val/) FROM cpu`,
`SELECT last(value) FROM cpu`,
`SELECT last(value), host FROM cpu`,
`SELECT last(value), * FROM cpu`,
`SELECT last(*) FROM cpu`,
`SELECT last(/val/) FROM cpu`,
`SELECT count(value) FROM cpu`,
`SELECT count(distinct(value)) FROM cpu`,
`SELECT count(distinct value) FROM cpu`,
`SELECT count(*) FROM cpu`,
`SELECT count(/val/) FROM cpu`,
`SELECT mean(value) FROM cpu`,
`SELECT mean(*) FROM cpu`,
`SELECT mean(/val/) FROM cpu`,
`SELECT min(value), max(value) FROM cpu`,
`SELECT min(*), max(*) FROM cpu`,
`SELECT min(/val/), max(/val/) FROM cpu`,
`SELECT first(value), last(value) FROM cpu`,
`SELECT first(*), last(*) FROM cpu`,
`SELECT first(/val/), last(/val/) FROM cpu`,
`SELECT count(value) FROM cpu WHERE time >= now() - 1h GROUP BY time(10m)`,
`SELECT distinct value FROM cpu`,
`SELECT distinct(value) FROM cpu`,
`SELECT value / total FROM cpu`,
`SELECT min(value) / total FROM cpu`,
`SELECT max(value) / total FROM cpu`,
`SELECT top(value, 1) FROM cpu`,
`SELECT top(value, host, 1) FROM cpu`,
`SELECT top(value, 1), host FROM cpu`,
`SELECT min(top) FROM (SELECT top(value, host, 1) FROM cpu) GROUP BY region`,
`SELECT bottom(value, 1) FROM cpu`,
`SELECT bottom(value, host, 1) FROM cpu`,
`SELECT bottom(value, 1), host FROM cpu`,
`SELECT max(bottom) FROM (SELECT bottom(value, host, 1) FROM cpu) GROUP BY region`,
`SELECT percentile(value, 75) FROM cpu`,
`SELECT percentile(value, 75.0) FROM cpu`,
`SELECT sample(value, 2) FROM cpu`,
`SELECT sample(*, 2) FROM cpu`,
`SELECT sample(/val/, 2) FROM cpu`,
`SELECT elapsed(value) FROM cpu`,
`SELECT elapsed(value, 10s) FROM cpu`,
`SELECT integral(value) FROM cpu`,
`SELECT integral(value, 10s) FROM cpu`,
`SELECT max(value) FROM cpu WHERE time >= now() - 1m GROUP BY time(10s, 5s)`,
`SELECT max(value) FROM cpu WHERE time >= now() - 1m GROUP BY time(10s, '2000-01-01T00:00:05Z')`,
`SELECT max(value) FROM cpu WHERE time >= now() - 1m GROUP BY time(10s, now())`,
`SELECT max(mean) FROM (SELECT mean(value) FROM cpu GROUP BY host)`,
`SELECT max(derivative) FROM (SELECT derivative(mean(value)) FROM cpu) WHERE time >= now() - 1m GROUP BY time(10s)`,
`SELECT max(value) FROM (SELECT value + total FROM cpu) WHERE time >= now() - 1m GROUP BY time(10s)`,
`SELECT value FROM cpu WHERE time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T01:00:00Z'`,
`SELECT value FROM (SELECT value FROM cpu) ORDER BY time DESC`,
`SELECT count(distinct(value)), max(value) FROM cpu`,
`SELECT derivative(distinct(value)), difference(distinct(value)) FROM cpu WHERE time >= now() - 1m GROUP BY time(5s)`,
`SELECT moving_average(distinct(value), 3) FROM cpu WHERE time >= now() - 5m GROUP BY time(1m)`,
`SELECT elapsed(distinct(value)) FROM cpu WHERE time >= now() - 5m GROUP BY time(1m)`,
`SELECT cumulative_sum(distinct(value)) FROM cpu WHERE time >= now() - 5m GROUP BY time(1m)`,
`SELECT last(value) / (1 - 0) FROM cpu`,
`SELECT abs(value) FROM cpu`,
`SELECT sin(value) FROM cpu`,
`SELECT cos(value) FROM cpu`,
`SELECT tan(value) FROM cpu`,
`SELECT asin(value) FROM cpu`,
`SELECT acos(value) FROM cpu`,
`SELECT atan(value) FROM cpu`,
`SELECT sqrt(value) FROM cpu`,
`SELECT pow(value, 2) FROM cpu`,
`SELECT pow(value, 3.14) FROM cpu`,
`SELECT pow(2, value) FROM cpu`,
`SELECT pow(3.14, value) FROM cpu`,
`SELECT exp(value) FROM cpu`,
`SELECT atan2(value, 0.1) FROM cpu`,
`SELECT atan2(0.2, value) FROM cpu`,
`SELECT atan2(value, 1) FROM cpu`,
`SELECT atan2(2, value) FROM cpu`,
`SELECT ln(value) FROM cpu`,
`SELECT log(value, 2) FROM cpu`,
`SELECT log2(value) FROM cpu`,
`SELECT log10(value) FROM cpu`,
`SELECT sin(value) - sin(1.3) FROM cpu`,
`SELECT value FROM cpu WHERE sin(value) > 0.5`,
`SELECT sum("out")/sum("in") FROM (SELECT derivative("out") AS "out", derivative("in") AS "in" FROM "m0" WHERE time >= now() - 5m GROUP BY "index") GROUP BY time(1m) fill(none)`,
} {
t.Run(tt, func(t *testing.T) {
stmt, err := influxql.ParseStatement(tt)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
s := stmt.(*influxql.SelectStatement)
opt := query.CompileOptions{}
if _, err := query.Compile(s, opt); err != nil {
t.Errorf("unexpected error: %s", err)
}
})
}
}
func TestCompile_Failures(t *testing.T) {
for _, tt := range []struct {
s string
err string
}{
{s: `SELECT time FROM cpu`, err: `at least 1 non-time field must be queried`},
{s: `SELECT value, mean(value) FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`},
{s: `SELECT value, max(value), min(value) FROM cpu`, err: `mixing multiple selector functions with tags or fields is not supported`},
{s: `SELECT top(value, 10), max(value) FROM cpu`, err: `selector function top() cannot be combined with other functions`},
{s: `SELECT bottom(value, 10), max(value) FROM cpu`, err: `selector function bottom() cannot be combined with other functions`},
{s: `SELECT count() FROM cpu`, err: `invalid number of arguments for count, expected 1, got 0`},
{s: `SELECT count(value, host) FROM cpu`, err: `invalid number of arguments for count, expected 1, got 2`},
{s: `SELECT min() FROM cpu`, err: `invalid number of arguments for min, expected 1, got 0`},
{s: `SELECT min(value, host) FROM cpu`, err: `invalid number of arguments for min, expected 1, got 2`},
{s: `SELECT max() FROM cpu`, err: `invalid number of arguments for max, expected 1, got 0`},
{s: `SELECT max(value, host) FROM cpu`, err: `invalid number of arguments for max, expected 1, got 2`},
{s: `SELECT sum() FROM cpu`, err: `invalid number of arguments for sum, expected 1, got 0`},
{s: `SELECT sum(value, host) FROM cpu`, err: `invalid number of arguments for sum, expected 1, got 2`},
{s: `SELECT first() FROM cpu`, err: `invalid number of arguments for first, expected 1, got 0`},
{s: `SELECT first(value, host) FROM cpu`, err: `invalid number of arguments for first, expected 1, got 2`},
{s: `SELECT last() FROM cpu`, err: `invalid number of arguments for last, expected 1, got 0`},
{s: `SELECT last(value, host) FROM cpu`, err: `invalid number of arguments for last, expected 1, got 2`},
{s: `SELECT mean() FROM cpu`, err: `invalid number of arguments for mean, expected 1, got 0`},
{s: `SELECT mean(value, host) FROM cpu`, err: `invalid number of arguments for mean, expected 1, got 2`},
{s: `SELECT distinct(value), max(value) FROM cpu`, err: `aggregate function distinct() cannot be combined with other functions or fields`},
{s: `SELECT count(distinct()) FROM cpu`, err: `distinct function requires at least one argument`},
{s: `SELECT count(distinct(value, host)) FROM cpu`, err: `distinct function can only have one argument`},
{s: `SELECT count(distinct(2)) FROM cpu`, err: `expected field argument in distinct()`},
{s: `SELECT value FROM cpu GROUP BY now()`, err: `only time() calls allowed in dimensions`},
{s: `SELECT value FROM cpu GROUP BY time()`, err: `time dimension expected 1 or 2 arguments`},
{s: `SELECT value FROM cpu GROUP BY time(5m, 30s, 1ms)`, err: `time dimension expected 1 or 2 arguments`},
{s: `SELECT value FROM cpu GROUP BY time('unexpected')`, err: `time dimension must have duration argument`},
{s: `SELECT value FROM cpu GROUP BY time(5m), time(1m)`, err: `multiple time dimensions not allowed`},
{s: `SELECT value FROM cpu GROUP BY time(5m, unexpected())`, err: `time dimension offset function must be now()`},
{s: `SELECT value FROM cpu GROUP BY time(5m, now(1m))`, err: `time dimension offset now() function requires no arguments`},
{s: `SELECT value FROM cpu GROUP BY time(5m, 'unexpected')`, err: `time dimension offset must be duration or now()`},
{s: `SELECT value FROM cpu GROUP BY 'unexpected'`, err: `only time and tag dimensions allowed`},
{s: `SELECT top(value) FROM cpu`, err: `invalid number of arguments for top, expected at least 2, got 1`},
{s: `SELECT top('unexpected', 5) FROM cpu`, err: `expected first argument to be a field in top(), found 'unexpected'`},
{s: `SELECT top(value, 'unexpected', 5) FROM cpu`, err: `only fields or tags are allowed in top(), found 'unexpected'`},
{s: `SELECT top(value, 2.5) FROM cpu`, err: `expected integer as last argument in top(), found 2.500`},
{s: `SELECT top(value, -1) FROM cpu`, err: `limit (-1) in top function must be at least 1`},
{s: `SELECT top(value, 3) FROM cpu LIMIT 2`, err: `limit (3) in top function can not be larger than the LIMIT (2) in the select statement`},
{s: `SELECT bottom(value) FROM cpu`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},
{s: `SELECT bottom('unexpected', 5) FROM cpu`, err: `expected first argument to be a field in bottom(), found 'unexpected'`},
{s: `SELECT bottom(value, 'unexpected', 5) FROM cpu`, err: `only fields or tags are allowed in bottom(), found 'unexpected'`},
{s: `SELECT bottom(value, 2.5) FROM cpu`, err: `expected integer as last argument in bottom(), found 2.500`},
{s: `SELECT bottom(value, -1) FROM cpu`, err: `limit (-1) in bottom function must be at least 1`},
{s: `SELECT bottom(value, 3) FROM cpu LIMIT 2`, err: `limit (3) in bottom function can not be larger than the LIMIT (2) in the select statement`},
// TODO(jsternberg): This query is wrong, but we cannot enforce this because of previous behavior: https://github.com/influxdata/influxdb/pull/8771
//{s: `SELECT value FROM cpu WHERE time >= now() - 10m OR time < now() - 5m`, err: `cannot use OR with time conditions`},
{s: `SELECT value FROM cpu WHERE value`, err: `invalid condition expression: value`},
{s: `SELECT count(value), * FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`},
{s: `SELECT max(*), host FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`},
{s: `SELECT count(value), /ho/ FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`},
{s: `SELECT max(/val/), * FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`},
{s: `SELECT a(value) FROM cpu`, err: `undefined function a()`},
{s: `SELECT count(max(value)) FROM myseries`, err: `expected field argument in count()`},
{s: `SELECT count(distinct('value')) FROM myseries`, err: `expected field argument in distinct()`},
{s: `SELECT distinct('value') FROM myseries`, err: `expected field argument in distinct()`},
{s: `SELECT min(max(value)) FROM myseries`, err: `expected field argument in min()`},
{s: `SELECT min(distinct(value)) FROM myseries`, err: `expected field argument in min()`},
{s: `SELECT max(max(value)) FROM myseries`, err: `expected field argument in max()`},
{s: `SELECT sum(max(value)) FROM myseries`, err: `expected field argument in sum()`},
{s: `SELECT first(max(value)) FROM myseries`, err: `expected field argument in first()`},
{s: `SELECT last(max(value)) FROM myseries`, err: `expected field argument in last()`},
{s: `SELECT mean(max(value)) FROM myseries`, err: `expected field argument in mean()`},
{s: `SELECT median(max(value)) FROM myseries`, err: `expected field argument in median()`},
{s: `SELECT mode(max(value)) FROM myseries`, err: `expected field argument in mode()`},
{s: `SELECT stddev(max(value)) FROM myseries`, err: `expected field argument in stddev()`},
{s: `SELECT spread(max(value)) FROM myseries`, err: `expected field argument in spread()`},
{s: `SELECT top() FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 0`},
{s: `SELECT top(field1) FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 1`},
{s: `SELECT top(field1,foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`},
{s: `SELECT top(field1,host,'server',foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`},
{s: `SELECT top(field1,5,'server',2) FROM myseries`, err: `only fields or tags are allowed in top(), found 5`},
{s: `SELECT top(field1,max(foo),'server',2) FROM myseries`, err: `only fields or tags are allowed in top(), found max(foo)`},
{s: `SELECT top(value, 10) + count(value) FROM myseries`, err: `selector function top() cannot be combined with other functions`},
{s: `SELECT top(max(value), 10) FROM myseries`, err: `expected first argument to be a field in top(), found max(value)`},
{s: `SELECT bottom() FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 0`},
{s: `SELECT bottom(field1) FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},
{s: `SELECT bottom(field1,foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`},
{s: `SELECT bottom(field1,host,'server',foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`},
{s: `SELECT bottom(field1,5,'server',2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found 5`},
{s: `SELECT bottom(field1,max(foo),'server',2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found max(foo)`},
{s: `SELECT bottom(value, 10) + count(value) FROM myseries`, err: `selector function bottom() cannot be combined with other functions`},
{s: `SELECT bottom(max(value), 10) FROM myseries`, err: `expected first argument to be a field in bottom(), found max(value)`},
{s: `SELECT top(value, 10), bottom(value, 10) FROM cpu`, err: `selector function top() cannot be combined with other functions`},
{s: `SELECT bottom(value, 10), top(value, 10) FROM cpu`, err: `selector function bottom() cannot be combined with other functions`},
{s: `SELECT sample(value) FROM myseries`, err: `invalid number of arguments for sample, expected 2, got 1`},
{s: `SELECT sample(value, 2, 3) FROM myseries`, err: `invalid number of arguments for sample, expected 2, got 3`},
{s: `SELECT sample(value, 0) FROM myseries`, err: `sample window must be greater than 1, got 0`},
{s: `SELECT sample(value, 2.5) FROM myseries`, err: `expected integer argument in sample()`},
{s: `SELECT percentile() FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 0`},
{s: `SELECT percentile(field1) FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 1`},
{s: `SELECT percentile(field1, foo) FROM myseries`, err: `expected float argument in percentile()`},
{s: `SELECT percentile(max(field1), 75) FROM myseries`, err: `expected field argument in percentile()`},
{s: `SELECT field1 FROM foo group by time(1s)`, err: `GROUP BY requires at least one aggregate function`},
{s: `SELECT field1 FROM foo fill(none)`, err: `fill(none) must be used with a function`},
{s: `SELECT field1 FROM foo fill(linear)`, err: `fill(linear) must be used with a function`},
{s: `SELECT count(value), value FROM foo`, err: `mixing aggregate and non-aggregate queries is not supported`},
{s: `SELECT count(value) FROM foo group by time`, err: `time() is a function and expects at least one argument`},
{s: `SELECT count(value) FROM foo group by 'time'`, err: `only time and tag dimensions allowed`},
{s: `SELECT count(value) FROM foo where time > now() and time < now() group by time()`, err: `time dimension expected 1 or 2 arguments`},
{s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(b)`, err: `time dimension must have duration argument`},
{s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s), time(2s)`, err: `multiple time dimensions not allowed`},
{s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s, b)`, err: `time dimension offset must be duration or now()`},
{s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s, '5s')`, err: `time dimension offset must be duration or now()`},
{s: `SELECT distinct(field1), sum(field1) FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`},
{s: `SELECT distinct(field1), field2 FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`},
{s: `SELECT distinct(field1, field2) FROM myseries`, err: `distinct function can only have one argument`},
{s: `SELECT distinct() FROM myseries`, err: `distinct function requires at least one argument`},
{s: `SELECT distinct field1, field2 FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`},
{s: `SELECT count(distinct field1, field2) FROM myseries`, err: `invalid number of arguments for count, expected 1, got 2`},
{s: `select count(distinct(too, many, arguments)) from myseries`, err: `distinct function can only have one argument`},
{s: `select count() from myseries`, err: `invalid number of arguments for count, expected 1, got 0`},
{s: `SELECT derivative(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`},
{s: `select derivative() from myseries`, err: `invalid number of arguments for derivative, expected at least 1 but no more than 2, got 0`},
{s: `select derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for derivative, expected at least 1 but no more than 2, got 3`},
{s: `SELECT derivative(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to derivative`},
{s: `SELECT derivative(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`},
{s: `SELECT derivative(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},
{s: `SELECT derivative(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`},
{s: `SELECT derivative(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`},
{s: `SELECT derivative(mean(value), 1h) FROM myseries where time < now() and time > now() - 1d`, err: `derivative aggregate requires a GROUP BY interval`},
{s: `SELECT derivative(value, -2h) FROM myseries`, err: `duration argument must be positive, got -2h`},
{s: `SELECT derivative(value, 10) FROM myseries`, err: `second argument to derivative must be a duration, got *influxql.IntegerLiteral`},
{s: `SELECT derivative(f, true) FROM myseries`, err: `second argument to derivative must be a duration, got *influxql.BooleanLiteral`},
{s: `SELECT non_negative_derivative(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`},
{s: `select non_negative_derivative() from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 0`},
{s: `select non_negative_derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 3`},
{s: `SELECT non_negative_derivative(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to non_negative_derivative`},
{s: `SELECT non_negative_derivative(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`},
{s: `SELECT non_negative_derivative(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},
{s: `SELECT non_negative_derivative(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`},
{s: `SELECT non_negative_derivative(mean(value), 1h) FROM myseries where time < now() and time > now() - 1d`, err: `non_negative_derivative aggregate requires a GROUP BY interval`},
{s: `SELECT non_negative_derivative(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`},
{s: `SELECT non_negative_derivative(value, -2h) FROM myseries`, err: `duration argument must be positive, got -2h`},
{s: `SELECT non_negative_derivative(value, 10) FROM myseries`, err: `second argument to non_negative_derivative must be a duration, got *influxql.IntegerLiteral`},
{s: `SELECT difference(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`},
{s: `SELECT difference() from myseries`, err: `invalid number of arguments for difference, expected 1, got 0`},
{s: `SELECT difference(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to difference`},
{s: `SELECT difference(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`},
{s: `SELECT difference(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},
{s: `SELECT difference(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`},
{s: `SELECT difference(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`},
{s: `SELECT difference(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `difference aggregate requires a GROUP BY interval`},
{s: `SELECT non_negative_difference(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`},
{s: `SELECT non_negative_difference() from myseries`, err: `invalid number of arguments for non_negative_difference, expected 1, got 0`},
{s: `SELECT non_negative_difference(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to non_negative_difference`},
{s: `SELECT non_negative_difference(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`},
{s: `SELECT non_negative_difference(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},
{s: `SELECT non_negative_difference(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`},
{s: `SELECT non_negative_difference(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`},
{s: `SELECT non_negative_difference(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `non_negative_difference aggregate requires a GROUP BY interval`},
{s: `SELECT elapsed() FROM myseries`, err: `invalid number of arguments for elapsed, expected at least 1 but no more than 2, got 0`},
{s: `SELECT elapsed(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to elapsed`},
{s: `SELECT elapsed(value, 1s, host) FROM myseries`, err: `invalid number of arguments for elapsed, expected at least 1 but no more than 2, got 3`},
{s: `SELECT elapsed(value, 0s) FROM myseries`, err: `duration argument must be positive, got 0s`},
{s: `SELECT elapsed(value, -10s) FROM myseries`, err: `duration argument must be positive, got -10s`},
{s: `SELECT elapsed(value, 10) FROM myseries`, err: `second argument to elapsed must be a duration, got *influxql.IntegerLiteral`},
{s: `SELECT elapsed(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`},
{s: `SELECT elapsed(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},
{s: `SELECT elapsed(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`},
{s: `SELECT elapsed(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`},
{s: `SELECT elapsed(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `elapsed aggregate requires a GROUP BY interval`},
{s: `SELECT moving_average(field1, 2), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`},
{s: `SELECT moving_average(field1, 1), field1 FROM myseries`, err: `moving_average window must be greater than 1, got 1`},
{s: `SELECT moving_average(field1, 0), field1 FROM myseries`, err: `moving_average window must be greater than 1, got 0`},
{s: `SELECT moving_average(field1, -1), field1 FROM myseries`, err: `moving_average window must be greater than 1, got -1`},
{s: `SELECT moving_average(field1, 2.0), field1 FROM myseries`, err: `second argument for moving_average must be an integer, got *influxql.NumberLiteral`},
{s: `SELECT moving_average() from myseries`, err: `invalid number of arguments for moving_average, expected 2, got 0`},
{s: `SELECT moving_average(value) FROM myseries`, err: `invalid number of arguments for moving_average, expected 2, got 1`},
{s: `SELECT moving_average(value, 2) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to moving_average`},
{s: `SELECT moving_average(top(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`},
{s: `SELECT moving_average(bottom(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},
{s: `SELECT moving_average(max(), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`},
{s: `SELECT moving_average(percentile(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`},
{s: `SELECT moving_average(mean(value), 2) FROM myseries where time < now() and time > now() - 1d`, err: `moving_average aggregate requires a GROUP BY interval`},
{s: `SELECT cumulative_sum(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`},
{s: `SELECT cumulative_sum() from myseries`, err: `invalid number of arguments for cumulative_sum, expected 1, got 0`},
{s: `SELECT cumulative_sum(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to cumulative_sum`},
{s: `SELECT cumulative_sum(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`},
{s: `SELECT cumulative_sum(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},
{s: `SELECT cumulative_sum(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`},
{s: `SELECT cumulative_sum(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`},
{s: `SELECT cumulative_sum(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `cumulative_sum aggregate requires a GROUP BY interval`},
{s: `SELECT integral() FROM myseries`, err: `invalid number of arguments for integral, expected at least 1 but no more than 2, got 0`},
{s: `SELECT integral(value, 10s, host) FROM myseries`, err: `invalid number of arguments for integral, expected at least 1 but no more than 2, got 3`},
{s: `SELECT integral(value, -10s) FROM myseries`, err: `duration argument must be positive, got -10s`},
{s: `SELECT integral(value, 10) FROM myseries`, err: `second argument must be a duration`},
{s: `SELECT holt_winters(value) FROM myseries where time < now() and time > now() - 1d`, err: `invalid number of arguments for holt_winters, expected 3, got 1`},
{s: `SELECT holt_winters(value, 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `must use aggregate function with holt_winters`},
{s: `SELECT holt_winters(min(value), 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `holt_winters aggregate requires a GROUP BY interval`},
{s: `SELECT holt_winters(min(value), 0, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `second arg to holt_winters must be greater than 0, got 0`},
{s: `SELECT holt_winters(min(value), false, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as second arg in holt_winters`},
{s: `SELECT holt_winters(min(value), 10, 'string') FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as third arg in holt_winters`},
{s: `SELECT holt_winters(min(value), 10, -1) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `third arg to holt_winters cannot be negative, got -1`},
{s: `SELECT holt_winters_with_fit(value) FROM myseries where time < now() and time > now() - 1d`, err: `invalid number of arguments for holt_winters_with_fit, expected 3, got 1`},
{s: `SELECT holt_winters_with_fit(value, 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `must use aggregate function with holt_winters_with_fit`},
{s: `SELECT holt_winters_with_fit(min(value), 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `holt_winters_with_fit aggregate requires a GROUP BY interval`},
{s: `SELECT holt_winters_with_fit(min(value), 0, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `second arg to holt_winters_with_fit must be greater than 0, got 0`},
{s: `SELECT holt_winters_with_fit(min(value), false, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as second arg in holt_winters_with_fit`},
{s: `SELECT holt_winters_with_fit(min(value), 10, 'string') FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as third arg in holt_winters_with_fit`},
{s: `SELECT holt_winters_with_fit(min(value), 10, -1) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `third arg to holt_winters_with_fit cannot be negative, got -1`},
{s: `SELECT mean(value) + value FROM cpu WHERE time < now() and time > now() - 1h GROUP BY time(10m)`, err: `mixing aggregate and non-aggregate queries is not supported`},
// TODO: Remove this restriction in the future: https://github.com/influxdata/influxdb/issues/5968
{s: `SELECT mean(cpu_total - cpu_idle) FROM cpu`, err: `expected field argument in mean()`},
{s: `SELECT derivative(mean(cpu_total - cpu_idle), 1s) FROM cpu WHERE time < now() AND time > now() - 1d GROUP BY time(1h)`, err: `expected field argument in mean()`},
// TODO: The error message will change when math is allowed inside an aggregate: https://github.com/influxdata/influxdb/pull/5990#issuecomment-195565870
{s: `SELECT count(foo + sum(bar)) FROM cpu`, err: `expected field argument in count()`},
{s: `SELECT (count(foo + sum(bar))) FROM cpu`, err: `expected field argument in count()`},
{s: `SELECT sum(value) + count(foo + sum(bar)) FROM cpu`, err: `expected field argument in count()`},
{s: `SELECT top(value, 2), max(value) FROM cpu`, err: `selector function top() cannot be combined with other functions`},
{s: `SELECT bottom(value, 2), max(value) FROM cpu`, err: `selector function bottom() cannot be combined with other functions`},
{s: `SELECT min(derivative) FROM (SELECT derivative(mean(value), 1h) FROM myseries) where time < now() and time > now() - 1d`, err: `derivative aggregate requires a GROUP BY interval`},
{s: `SELECT min(mean) FROM (SELECT mean(value) FROM myseries GROUP BY time)`, err: `time() is a function and expects at least one argument`},
{s: `SELECT value FROM myseries WHERE value OR time >= now() - 1m`, err: `invalid condition expression: value`},
{s: `SELECT value FROM myseries WHERE time >= now() - 1m OR value`, err: `invalid condition expression: value`},
{s: `SELECT value FROM (SELECT value FROM cpu ORDER BY time DESC) ORDER BY time ASC`, err: `subqueries must be ordered in the same direction as the query itself`},
{s: `SELECT sin(value, 3) FROM cpu`, err: `invalid number of arguments for sin, expected 1, got 2`},
{s: `SELECT cos(2.3, value, 3) FROM cpu`, err: `invalid number of arguments for cos, expected 1, got 3`},
{s: `SELECT tan(value, 3) FROM cpu`, err: `invalid number of arguments for tan, expected 1, got 2`},
{s: `SELECT asin(value, 3) FROM cpu`, err: `invalid number of arguments for asin, expected 1, got 2`},
{s: `SELECT acos(value, 3.2) FROM cpu`, err: `invalid number of arguments for acos, expected 1, got 2`},
{s: `SELECT atan() FROM cpu`, err: `invalid number of arguments for atan, expected 1, got 0`},
{s: `SELECT sqrt(42, 3, 4) FROM cpu`, err: `invalid number of arguments for sqrt, expected 1, got 3`},
{s: `SELECT abs(value, 3) FROM cpu`, err: `invalid number of arguments for abs, expected 1, got 2`},
{s: `SELECT ln(value, 3) FROM cpu`, err: `invalid number of arguments for ln, expected 1, got 2`},
{s: `SELECT log2(value, 3) FROM cpu`, err: `invalid number of arguments for log2, expected 1, got 2`},
{s: `SELECT log10(value, 3) FROM cpu`, err: `invalid number of arguments for log10, expected 1, got 2`},
{s: `SELECT pow(value, 3, 3) FROM cpu`, err: `invalid number of arguments for pow, expected 2, got 3`},
{s: `SELECT atan2(value, 3, 3) FROM cpu`, err: `invalid number of arguments for atan2, expected 2, got 3`},
{s: `SELECT sin(1.3) FROM cpu`, err: `field must contain at least one variable`},
{s: `SELECT nofunc(1.3) FROM cpu`, err: `undefined function nofunc()`},
} {
t.Run(tt.s, func(t *testing.T) {
stmt, err := influxql.ParseStatement(tt.s)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
s := stmt.(*influxql.SelectStatement)
opt := query.CompileOptions{}
if _, err := query.Compile(s, opt); err == nil {
t.Error("expected error")
} else if have, want := err.Error(), tt.err; have != want {
t.Errorf("unexpected error: %s != %s", have, want)
}
})
}
}
func TestPrepare_MapShardsTimeRange(t *testing.T) {
for _, tt := range []struct {
s string
start, end string
}{
{
s: `SELECT max(value) FROM cpu WHERE time >= '2018-09-03T15:00:00Z' AND time <= '2018-09-03T16:00:00Z' GROUP BY time(10m)`,
start: "2018-09-03T15:00:00Z",
end: "2018-09-03T16:00:00Z",
},
{
s: `SELECT derivative(mean(value)) FROM cpu WHERE time >= '2018-09-03T15:00:00Z' AND time <= '2018-09-03T16:00:00Z' GROUP BY time(10m)`,
start: "2018-09-03T14:50:00Z",
end: "2018-09-03T16:00:00Z",
},
{
s: `SELECT moving_average(mean(value), 3) FROM cpu WHERE time >= '2018-09-03T15:00:00Z' AND time <= '2018-09-03T16:00:00Z' GROUP BY time(10m)`,
start: "2018-09-03T14:30:00Z",
end: "2018-09-03T16:00:00Z",
},
{
s: `SELECT moving_average(mean(value), 3) FROM cpu WHERE time <= '2018-09-03T16:00:00Z' GROUP BY time(10m)`,
start: "1677-09-21T00:12:43.145224194Z",
end: "2018-09-03T16:00:00Z",
},
} {
t.Run(tt.s, func(t *testing.T) {
stmt, err := influxql.ParseStatement(tt.s)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
s := stmt.(*influxql.SelectStatement)
opt := query.CompileOptions{}
c, err := query.Compile(s, opt)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
shardMapper := ShardMapper{
MapShardsFn: func(_ context.Context, _ influxql.Sources, tr influxql.TimeRange) query.ShardGroup {
if got, want := tr.Min, mustParseTime(tt.start); !got.Equal(want) {
t.Errorf("unexpected start time: got=%s want=%s", got, want)
}
if got, want := tr.Max, mustParseTime(tt.end); !got.Equal(want) {
t.Errorf("unexpected end time: got=%s want=%s", got, want)
}
return &ShardGroup{}
},
}
if _, err := c.Prepare(context.Background(), &shardMapper, query.SelectOptions{}); err != nil {
t.Fatalf("unexpected error: %s", err)
}
})
}
}

447
influxql/query/cursor.go Normal file
View File

@ -0,0 +1,447 @@
package query
import (
"math"
"time"
"github.com/influxdata/influxql"
)
var NullFloat interface{} = (*float64)(nil)
// Series represents the metadata about a series.
type Series struct {
// Name is the measurement name.
Name string
// Tags for the series.
Tags Tags
// This is an internal id used to easily compare if a series is the
// same as another series. Whenever the internal cursor changes
// to a new series, this id gets incremented. It is not exposed to
// the user so we can implement this in whatever way we want.
// If a series is not generated by a cursor, this id is zero and
// it will instead attempt to compare the name and tags.
id uint64
}
// SameSeries checks if this is the same series as another one.
// It does not necessarily check for equality so this is different from
// checking to see if the name and tags are the same. It checks whether
// the two are part of the same series in the response.
func (s Series) SameSeries(other Series) bool {
if s.id != 0 && other.id != 0 {
return s.id == other.id
}
return s.Name == other.Name && s.Tags.ID() == other.Tags.ID()
}
// Equal checks to see if the Series are identical.
func (s Series) Equal(other Series) bool {
if s.id != 0 && other.id != 0 {
// If the ids are the same, then we can short-circuit and assume they
// are the same. If they are not the same, do the long check since
// they may still be identical, but not necessarily generated from
// the same cursor.
if s.id == other.id {
return true
}
}
return s.Name == other.Name && s.Tags.ID() == other.Tags.ID()
}
// Row represents a single row returned by the query engine.
type Row struct {
// Time returns the time for this row. If the cursor was created to
// return time as one of the values, the time will also be included as
// a time.Time in the appropriate column within Values.
// This ensures that time is always present in the Row structure
// even if it hasn't been requested in the output.
Time int64
// Series contains the series metadata for this row.
Series Series
// Values contains the values within the current row.
Values []interface{}
}
type Cursor interface {
// Scan will retrieve the next row and assign the result to
// the passed in Row. If the Row has not been initialized, the Cursor
// will initialize the Row.
// To increase speed and memory usage, the same Row can be used and
// the previous values will be overwritten while using the same memory.
Scan(row *Row) bool
// Stats returns the IteratorStats from the underlying iterators.
Stats() IteratorStats
// Err returns any errors that were encountered from scanning the rows.
Err() error
// Columns returns the column names and types.
Columns() []influxql.VarRef
// Close closes the underlying resources that the cursor is using.
Close() error
}
// RowCursor returns a Cursor that iterates over Rows.
func RowCursor(rows []Row, columns []influxql.VarRef) Cursor {
return &rowCursor{
rows: rows,
columns: columns,
}
}
type rowCursor struct {
rows []Row
columns []influxql.VarRef
series Series
}
func (cur *rowCursor) Scan(row *Row) bool {
if len(cur.rows) == 0 {
return false
}
*row = cur.rows[0]
if row.Series.Name != cur.series.Name || !row.Series.Tags.Equals(&cur.series.Tags) {
cur.series.Name = row.Series.Name
cur.series.Tags = row.Series.Tags
cur.series.id++
}
cur.rows = cur.rows[1:]
return true
}
func (cur *rowCursor) Stats() IteratorStats {
return IteratorStats{}
}
func (cur *rowCursor) Err() error {
return nil
}
func (cur *rowCursor) Columns() []influxql.VarRef {
return cur.columns
}
func (cur *rowCursor) Close() error {
return nil
}
type scannerFunc func(m map[string]interface{}) (int64, string, Tags)
type scannerCursorBase struct {
fields []influxql.Expr
m map[string]interface{}
series Series
columns []influxql.VarRef
loc *time.Location
scan scannerFunc
valuer influxql.ValuerEval
}
func newScannerCursorBase(scan scannerFunc, fields []*influxql.Field, loc *time.Location) scannerCursorBase {
typmap := FunctionTypeMapper{}
exprs := make([]influxql.Expr, len(fields))
columns := make([]influxql.VarRef, len(fields))
for i, f := range fields {
exprs[i] = f.Expr
columns[i] = influxql.VarRef{
Val: f.Name(),
Type: influxql.EvalType(f.Expr, nil, typmap),
}
}
if loc == nil {
loc = time.UTC
}
m := make(map[string]interface{})
return scannerCursorBase{
fields: exprs,
m: m,
columns: columns,
loc: loc,
scan: scan,
valuer: influxql.ValuerEval{
Valuer: influxql.MultiValuer(
MathValuer{},
influxql.MapValuer(m),
),
IntegerFloatDivision: true,
},
}
}
func (cur *scannerCursorBase) Scan(row *Row) bool {
ts, name, tags := cur.scan(cur.m)
if ts == ZeroTime {
return false
}
row.Time = ts
if name != cur.series.Name || tags.ID() != cur.series.Tags.ID() {
cur.series.Name = name
cur.series.Tags = tags
cur.series.id++
}
row.Series = cur.series
if len(cur.columns) > len(row.Values) {
row.Values = make([]interface{}, len(cur.columns))
}
for i, expr := range cur.fields {
// A special case if the field is time to reduce memory allocations.
if ref, ok := expr.(*influxql.VarRef); ok && ref.Val == "time" {
row.Values[i] = time.Unix(0, row.Time).In(cur.loc)
continue
}
v := cur.valuer.Eval(expr)
if fv, ok := v.(float64); ok && math.IsNaN(fv) {
// If the float value is NaN, convert it to a null float
// so this can be serialized correctly, but not mistaken for
// a null value that needs to be filled.
v = NullFloat
}
row.Values[i] = v
}
return true
}
func (cur *scannerCursorBase) Columns() []influxql.VarRef {
return cur.columns
}
func (cur *scannerCursorBase) clear(m map[string]interface{}) {
for k := range m {
delete(m, k)
}
}
var _ Cursor = (*scannerCursor)(nil)
type scannerCursor struct {
scanner IteratorScanner
scannerCursorBase
}
func newScannerCursor(s IteratorScanner, fields []*influxql.Field, opt IteratorOptions) *scannerCursor {
cur := &scannerCursor{scanner: s}
cur.scannerCursorBase = newScannerCursorBase(cur.scan, fields, opt.Location)
return cur
}
func (s *scannerCursor) scan(m map[string]interface{}) (int64, string, Tags) {
ts, name, tags := s.scanner.Peek()
// if a new series, clear the map of previous values
if name != s.series.Name || tags.ID() != s.series.Tags.ID() {
s.clear(m)
}
if ts == ZeroTime {
return ts, name, tags
}
s.scanner.ScanAt(ts, name, tags, m)
return ts, name, tags
}
func (cur *scannerCursor) Stats() IteratorStats {
return cur.scanner.Stats()
}
func (cur *scannerCursor) Err() error {
return cur.scanner.Err()
}
func (cur *scannerCursor) Close() error {
return cur.scanner.Close()
}
var _ Cursor = (*multiScannerCursor)(nil)
type multiScannerCursor struct {
scanners []IteratorScanner
err error
ascending bool
scannerCursorBase
}
func newMultiScannerCursor(scanners []IteratorScanner, fields []*influxql.Field, opt IteratorOptions) *multiScannerCursor {
cur := &multiScannerCursor{
scanners: scanners,
ascending: opt.Ascending,
}
cur.scannerCursorBase = newScannerCursorBase(cur.scan, fields, opt.Location)
return cur
}
func (cur *multiScannerCursor) scan(m map[string]interface{}) (ts int64, name string, tags Tags) {
ts = ZeroTime
for _, s := range cur.scanners {
curTime, curName, curTags := s.Peek()
if curTime == ZeroTime {
if err := s.Err(); err != nil {
cur.err = err
return ZeroTime, "", Tags{}
}
continue
}
if ts == ZeroTime {
ts, name, tags = curTime, curName, curTags
continue
}
if cur.ascending {
if (curName < name) || (curName == name && curTags.ID() < tags.ID()) || (curName == name && curTags.ID() == tags.ID() && curTime < ts) {
ts, name, tags = curTime, curName, curTags
}
continue
}
if (curName > name) || (curName == name && curTags.ID() > tags.ID()) || (curName == name && curTags.ID() == tags.ID() && curTime > ts) {
ts, name, tags = curTime, curName, curTags
}
}
if ts == ZeroTime {
return ts, name, tags
}
// if a new series, clear the map of previous values
if name != cur.series.Name || tags.ID() != cur.series.Tags.ID() {
cur.clear(m)
}
for _, s := range cur.scanners {
s.ScanAt(ts, name, tags, m)
}
return ts, name, tags
}
func (cur *multiScannerCursor) Stats() IteratorStats {
var stats IteratorStats
for _, s := range cur.scanners {
stats.Add(s.Stats())
}
return stats
}
func (cur *multiScannerCursor) Err() error {
return cur.err
}
func (cur *multiScannerCursor) Close() error {
var err error
for _, s := range cur.scanners {
if e := s.Close(); e != nil && err == nil {
err = e
}
}
return err
}
type filterCursor struct {
Cursor
// fields holds the mapping of field names to the index in the row
// based off of the column metadata. This only contains the fields
// we need and will exclude the ones we do not.
fields map[string]IteratorMap
filter influxql.Expr
m map[string]interface{}
valuer influxql.ValuerEval
}
func newFilterCursor(cur Cursor, filter influxql.Expr) *filterCursor {
fields := make(map[string]IteratorMap)
for _, name := range influxql.ExprNames(filter) {
for i, col := range cur.Columns() {
if name.Val == col.Val {
fields[name.Val] = FieldMap{
Index: i,
Type: name.Type,
}
break
}
}
// If the field is not a column, assume it is a tag value.
// We do not know what the tag values will be, but there really
// isn't any different between NullMap and a TagMap that's pointed
// at the wrong location for the purposes described here.
if _, ok := fields[name.Val]; !ok {
fields[name.Val] = TagMap(name.Val)
}
}
m := make(map[string]interface{})
return &filterCursor{
Cursor: cur,
fields: fields,
filter: filter,
m: m,
valuer: influxql.ValuerEval{Valuer: influxql.MapValuer(m)},
}
}
func (cur *filterCursor) Scan(row *Row) bool {
for cur.Cursor.Scan(row) {
// Use the field mappings to prepare the map for the valuer.
for name, f := range cur.fields {
cur.m[name] = f.Value(row)
}
if cur.valuer.EvalBool(cur.filter) {
// Passes the filter! Return true. We no longer need to
// search for a suitable value.
return true
}
}
return false
}
type nullCursor struct {
columns []influxql.VarRef
}
func newNullCursor(fields []*influxql.Field) *nullCursor {
columns := make([]influxql.VarRef, len(fields))
for i, f := range fields {
columns[i].Val = f.Name()
}
return &nullCursor{columns: columns}
}
func (cur *nullCursor) Scan(row *Row) bool {
return false
}
func (cur *nullCursor) Stats() IteratorStats {
return IteratorStats{}
}
func (cur *nullCursor) Err() error {
return nil
}
func (cur *nullCursor) Columns() []influxql.VarRef {
return cur.columns
}
func (cur *nullCursor) Close() error {
return nil
}
// DrainCursor will read and discard all values from a Cursor and return the error
// if one happens.
func DrainCursor(cur Cursor) error {
var row Row
for cur.Scan(&row) {
// Do nothing with the result.
}
return cur.Err()
}

81
influxql/query/emitter.go Normal file
View File

@ -0,0 +1,81 @@
package query
import (
"github.com/influxdata/influxdb/v2/models"
)
// Emitter reads from a cursor into rows.
type Emitter struct {
cur Cursor
chunkSize int
series Series
row *models.Row
columns []string
}
// NewEmitter returns a new instance of Emitter that pulls from itrs.
func NewEmitter(cur Cursor, chunkSize int) *Emitter {
columns := make([]string, len(cur.Columns()))
for i, col := range cur.Columns() {
columns[i] = col.Val
}
return &Emitter{
cur: cur,
chunkSize: chunkSize,
columns: columns,
}
}
// Close closes the underlying iterators.
func (e *Emitter) Close() error {
return e.cur.Close()
}
// Emit returns the next row from the iterators.
func (e *Emitter) Emit() (*models.Row, bool, error) {
// Continually read from the cursor until it is exhausted.
for {
// Scan the next row. If there are no rows left, return the current row.
var row Row
if !e.cur.Scan(&row) {
if err := e.cur.Err(); err != nil {
return nil, false, err
}
r := e.row
e.row = nil
return r, false, nil
}
// If there's no row yet then create one.
// If the name and tags match the existing row, append to that row if
// the number of values doesn't exceed the chunk size.
// Otherwise return existing row and add values to next emitted row.
if e.row == nil {
e.createRow(row.Series, row.Values)
} else if e.series.SameSeries(row.Series) {
if e.chunkSize > 0 && len(e.row.Values) >= e.chunkSize {
r := e.row
r.Partial = true
e.createRow(row.Series, row.Values)
return r, true, nil
}
e.row.Values = append(e.row.Values, row.Values)
} else {
r := e.row
e.createRow(row.Series, row.Values)
return r, true, nil
}
}
}
// createRow creates a new row attached to the emitter.
func (e *Emitter) createRow(series Series, values []interface{}) {
e.series = series
e.row = &models.Row{
Name: series.Name,
Tags: series.Tags.KeyValues(),
Columns: e.columns,
Values: [][]interface{}{values},
}
}

View File

@ -0,0 +1,34 @@
package query
import (
"context"
iql "github.com/influxdata/influxdb/v2/influxql"
)
// ExecutionContext contains state that the query is currently executing with.
type ExecutionContext struct {
// The statement ID of the executing query.
statementID int
// Output channel where results and errors should be sent.
Results chan *Result
// StatisticsGatherer gathers metrics about the execution of a query.
StatisticsGatherer *iql.StatisticsGatherer
// Options used to start this query.
ExecutionOptions
}
// Send sends a Result to the Results channel and will exit if the query has
// been interrupted or aborted.
func (ectx *ExecutionContext) Send(ctx context.Context, result *Result) error {
result.StatementID = ectx.statementID
select {
case <-ctx.Done():
return ctx.Err()
case ectx.Results <- result:
}
return nil
}

366
influxql/query/executor.go Normal file
View File

@ -0,0 +1,366 @@
package query
import (
"context"
"errors"
"fmt"
"os"
"runtime/debug"
"strconv"
"time"
"github.com/influxdata/influxdb/v2"
iql "github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/influxql/control"
"github.com/influxdata/influxdb/v2/kit/tracing"
"github.com/influxdata/influxdb/v2/models"
"github.com/influxdata/influxql"
"github.com/opentracing/opentracing-go/log"
"go.uber.org/zap"
)
var (
// ErrInvalidQuery is returned when executing an unknown query type.
ErrInvalidQuery = errors.New("invalid query")
// ErrNotExecuted is returned when a statement is not executed in a query.
// This can occur when a previous statement in the same query has errored.
ErrNotExecuted = errors.New("not executed")
// ErrQueryInterrupted is an error returned when the query is interrupted.
ErrQueryInterrupted = errors.New("query interrupted")
)
const (
// PanicCrashEnv is the environment variable that, when set, will prevent
// the handler from recovering any panics.
PanicCrashEnv = "INFLUXDB_PANIC_CRASH"
)
// ErrDatabaseNotFound returns a database not found error for the given database name.
func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) }
// ErrMaxSelectPointsLimitExceeded is an error when a query hits the maximum number of points.
func ErrMaxSelectPointsLimitExceeded(n, limit int) error {
return fmt.Errorf("max-select-point limit exceeed: (%d/%d)", n, limit)
}
// ErrMaxConcurrentQueriesLimitExceeded is an error when a query cannot be run
// because the maximum number of queries has been reached.
func ErrMaxConcurrentQueriesLimitExceeded(n, limit int) error {
return fmt.Errorf("max-concurrent-queries limit exceeded(%d, %d)", n, limit)
}
// Authorizer determines if certain operations are authorized.
type Authorizer interface {
// AuthorizeDatabase indicates whether the given Privilege is authorized on the database with the given name.
AuthorizeDatabase(p influxql.Privilege, name string) bool
// AuthorizeQuery returns an error if the query cannot be executed
AuthorizeQuery(database string, query *influxql.Query) error
// AuthorizeSeriesRead determines if a series is authorized for reading
AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool
// AuthorizeSeriesWrite determines if a series is authorized for writing
AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool
}
// OpenAuthorizer is the Authorizer used when authorization is disabled.
// It allows all operations.
type openAuthorizer struct{}
// OpenAuthorizer can be shared by all goroutines.
var OpenAuthorizer = openAuthorizer{}
// AuthorizeDatabase returns true to allow any operation on a database.
func (a openAuthorizer) AuthorizeDatabase(influxql.Privilege, string) bool { return true }
// AuthorizeSeriesRead allows access to any series.
func (a openAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool {
return true
}
// AuthorizeSeriesWrite allows access to any series.
func (a openAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool {
return true
}
// AuthorizeSeriesRead allows any query to execute.
func (a openAuthorizer) AuthorizeQuery(_ string, _ *influxql.Query) error { return nil }
// AuthorizerIsOpen returns true if the provided Authorizer is guaranteed to
// authorize anything. A nil Authorizer returns true for this function, and this
// function should be preferred over directly checking if an Authorizer is nil
// or not.
func AuthorizerIsOpen(a Authorizer) bool {
if u, ok := a.(interface{ AuthorizeUnrestricted() bool }); ok {
return u.AuthorizeUnrestricted()
}
return a == nil || a == OpenAuthorizer
}
// ExecutionOptions contains the options for executing a query.
type ExecutionOptions struct {
// OrgID is the organization for which this query is being executed.
OrgID influxdb.ID
// The database the query is running against.
Database string
// The retention policy the query is running against.
RetentionPolicy string
// How to determine whether the query is allowed to execute,
// what resources can be returned in SHOW queries, etc.
Authorizer Authorizer
// The requested maximum number of points to return in each result.
ChunkSize int
// If this query is being executed in a read-only context.
ReadOnly bool
// Node to execute on.
NodeID uint64
// Quiet suppresses non-essential output from the query executor.
Quiet bool
}
type (
iteratorsContextKey struct{}
)
// NewContextWithIterators returns a new context.Context with the *Iterators slice added.
// The query planner will add instances of AuxIterator to the Iterators slice.
func NewContextWithIterators(ctx context.Context, itr *Iterators) context.Context {
return context.WithValue(ctx, iteratorsContextKey{}, itr)
}
// StatementExecutor executes a statement within the Executor.
type StatementExecutor interface {
// ExecuteStatement executes a statement. Results should be sent to the
// results channel in the ExecutionContext.
ExecuteStatement(ctx context.Context, stmt influxql.Statement, ectx *ExecutionContext) error
}
// StatementNormalizer normalizes a statement before it is executed.
type StatementNormalizer interface {
// NormalizeStatement adds a default database and policy to the
// measurements in the statement.
NormalizeStatement(ctx context.Context, stmt influxql.Statement, database, retentionPolicy string, ectx *ExecutionContext) error
}
var (
nullNormalizer StatementNormalizer = &nullNormalizerImpl{}
)
type nullNormalizerImpl struct{}
func (n *nullNormalizerImpl) NormalizeStatement(ctx context.Context, stmt influxql.Statement, database, retentionPolicy string, ectx *ExecutionContext) error {
return nil
}
// Executor executes every statement in an Query.
type Executor struct {
// Used for executing a statement in the query.
StatementExecutor StatementExecutor
// StatementNormalizer normalizes a statement before it is executed.
StatementNormalizer StatementNormalizer
Metrics *control.ControllerMetrics
log *zap.Logger
}
// NewExecutor returns a new instance of Executor.
func NewExecutor(logger *zap.Logger, cm *control.ControllerMetrics) *Executor {
return &Executor{
StatementNormalizer: nullNormalizer,
Metrics: cm,
log: logger.With(zap.String("service", "query")),
}
}
// Close kills all running queries and prevents new queries from being attached.
func (e *Executor) Close() error {
return nil
}
// ExecuteQuery executes each statement within a query.
func (e *Executor) ExecuteQuery(ctx context.Context, query *influxql.Query, opt ExecutionOptions) (<-chan *Result, *iql.Statistics) {
results := make(chan *Result)
statistics := new(iql.Statistics)
go e.executeQuery(ctx, query, opt, results, statistics)
return results, statistics
}
func (e *Executor) executeQuery(ctx context.Context, query *influxql.Query, opt ExecutionOptions, results chan *Result, statistics *iql.Statistics) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer func() {
close(results)
span.Finish()
}()
defer e.recover(query, results)
gatherer := new(iql.StatisticsGatherer)
statusLabel := control.LabelSuccess
defer func(start time.Time) {
dur := time.Since(start)
e.Metrics.ExecutingDuration.WithLabelValues(statusLabel).Observe(dur.Seconds())
}(time.Now())
ectx := &ExecutionContext{StatisticsGatherer: gatherer, ExecutionOptions: opt}
// Setup the execution context that will be used when executing statements.
ectx.Results = results
var i int
LOOP:
for ; i < len(query.Statements); i++ {
ectx.statementID = i
stmt := query.Statements[i]
// If a default database wasn't passed in by the caller, check the statement.
defaultDB := opt.Database
if defaultDB == "" {
if s, ok := stmt.(influxql.HasDefaultDatabase); ok {
defaultDB = s.DefaultDatabase()
}
}
// Do not let queries manually use the system measurements. If we find
// one, return an error. This prevents a person from using the
// measurement incorrectly and causing a panic.
if stmt, ok := stmt.(*influxql.SelectStatement); ok {
for _, s := range stmt.Sources {
switch s := s.(type) {
case *influxql.Measurement:
if influxql.IsSystemName(s.Name) {
command := "the appropriate meta command"
switch s.Name {
case "_fieldKeys":
command = "SHOW FIELD KEYS"
case "_measurements":
command = "SHOW MEASUREMENTS"
case "_series":
command = "SHOW SERIES"
case "_tagKeys":
command = "SHOW TAG KEYS"
case "_tags":
command = "SHOW TAG VALUES"
}
_ = ectx.Send(ctx, &Result{
Err: fmt.Errorf("unable to use system source '%s': use %s instead", s.Name, command),
})
break LOOP
}
}
}
}
// Rewrite statements, if necessary.
// This can occur on meta read statements which convert to SELECT statements.
newStmt, err := RewriteStatement(stmt)
if err != nil {
_ = ectx.Send(ctx, &Result{Err: err})
break
}
stmt = newStmt
if err := e.StatementNormalizer.NormalizeStatement(ctx, stmt, defaultDB, opt.RetentionPolicy, ectx); err != nil {
if err := ectx.Send(ctx, &Result{Err: err}); err != nil {
return
}
break
}
statistics.StatementCount += 1
// Log each normalized statement.
if !ectx.Quiet {
e.log.Info("Executing query", zap.Stringer("query", stmt))
span.LogFields(log.String("normalized_query", stmt.String()))
}
gatherer.Reset()
stmtStart := time.Now()
// Send any other statements to the underlying statement executor.
err = tracing.LogError(span, e.StatementExecutor.ExecuteStatement(ctx, stmt, ectx))
stmtDur := time.Since(stmtStart)
stmtStats := gatherer.Statistics()
stmtStats.ExecuteDuration = stmtDur - stmtStats.PlanDuration
statistics.Add(stmtStats)
// Send an error for this result if it failed for some reason.
if err != nil {
statusLabel = control.LabelNotExecuted
e.Metrics.Requests.WithLabelValues(statusLabel).Inc()
_ = ectx.Send(ctx, &Result{
StatementID: i,
Err: err,
})
// Stop after the first error.
break
}
e.Metrics.Requests.WithLabelValues(statusLabel).Inc()
// Check if the query was interrupted during an uninterruptible statement.
interrupted := false
select {
case <-ctx.Done():
interrupted = true
default:
// Query has not been interrupted.
}
if interrupted {
statusLabel = control.LabelInterruptedErr
e.Metrics.Requests.WithLabelValues(statusLabel).Inc()
break
}
}
// Send error results for any statements which were not executed.
for ; i < len(query.Statements)-1; i++ {
if err := ectx.Send(ctx, &Result{
StatementID: i,
Err: ErrNotExecuted,
}); err != nil {
break
}
}
}
// Determines if the Executor will recover any panics or let them crash
// the server.
var willCrash bool
func init() {
var err error
if willCrash, err = strconv.ParseBool(os.Getenv(PanicCrashEnv)); err != nil {
willCrash = false
}
}
func (e *Executor) recover(query *influxql.Query, results chan *Result) {
if err := recover(); err != nil {
e.log.Error(fmt.Sprintf("%s [panic:%s] %s", query.String(), err, debug.Stack()))
results <- &Result{
StatementID: -1,
Err: fmt.Errorf("%s [panic:%s]", query.String(), err),
}
if willCrash {
e.log.Error("\n\n=====\nAll goroutines now follow:")
buf := debug.Stack()
e.log.Error(fmt.Sprintf("%s", buf))
os.Exit(1)
}
}
}

View File

@ -0,0 +1,199 @@
package query_test
import (
"context"
"errors"
"testing"
"time"
"github.com/golang/mock/gomock"
iql "github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/influxql/control"
"github.com/influxdata/influxdb/v2/influxql/query"
"github.com/influxdata/influxdb/v2/influxql/query/mocks"
"github.com/influxdata/influxql"
"github.com/stretchr/testify/assert"
"go.uber.org/zap/zaptest"
)
var errUnexpected = errors.New("unexpected error")
type StatementExecutor struct {
ExecuteStatementFn func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error
}
func (e *StatementExecutor) ExecuteStatement(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error {
return e.ExecuteStatementFn(ctx, stmt, ectx)
}
func NewQueryExecutor(t *testing.T) *query.Executor {
return query.NewExecutor(zaptest.NewLogger(t), control.NewControllerMetrics([]string{}))
}
func TestQueryExecutor_Interrupt(t *testing.T) {
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
if err != nil {
t.Fatal(err)
}
e := NewQueryExecutor(t)
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error {
select {
case <-ctx.Done():
return nil
case <-time.After(100 * time.Millisecond):
t.Error("killing the query did not close the channel after 100 milliseconds")
return errUnexpected
}
},
}
ctx, cancel := context.WithCancel(context.Background())
results, _ := e.ExecuteQuery(ctx, q, query.ExecutionOptions{})
cancel()
result := <-results
if result != nil && result.Err != query.ErrQueryInterrupted {
t.Errorf("unexpected error: %s", result.Err)
}
}
func TestQueryExecutor_Abort(t *testing.T) {
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
if err != nil {
t.Fatal(err)
}
ch1 := make(chan struct{})
ch2 := make(chan struct{})
e := NewQueryExecutor(t)
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error {
<-ch1
if err := ectx.Send(ctx, &query.Result{Err: errUnexpected}); err == nil {
t.Errorf("expected error")
}
close(ch2)
return nil
},
}
ctx, cancel := context.WithCancel(context.Background())
cancel()
results, _ := e.ExecuteQuery(ctx, q, query.ExecutionOptions{})
close(ch1)
<-ch2
discardOutput(results)
}
func TestQueryExecutor_Panic(t *testing.T) {
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
if err != nil {
t.Fatal(err)
}
e := NewQueryExecutor(t)
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error {
panic("test error")
},
}
results, _ := e.ExecuteQuery(context.Background(), q, query.ExecutionOptions{})
result := <-results
if len(result.Series) != 0 {
t.Errorf("expected %d rows, got %d", 0, len(result.Series))
}
if result.Err == nil || result.Err.Error() != "SELECT count(value) FROM cpu [panic:test error]" {
t.Errorf("unexpected error: %s", result.Err)
}
}
func TestQueryExecutor_InvalidSource(t *testing.T) {
e := NewQueryExecutor(t)
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error {
return errors.New("statement executed unexpectedly")
},
}
for i, tt := range []struct {
q string
err string
}{
{
q: `SELECT fieldKey, fieldType FROM _fieldKeys`,
err: `unable to use system source '_fieldKeys': use SHOW FIELD KEYS instead`,
},
{
q: `SELECT "name" FROM _measurements`,
err: `unable to use system source '_measurements': use SHOW MEASUREMENTS instead`,
},
{
q: `SELECT "key" FROM _series`,
err: `unable to use system source '_series': use SHOW SERIES instead`,
},
{
q: `SELECT tagKey FROM _tagKeys`,
err: `unable to use system source '_tagKeys': use SHOW TAG KEYS instead`,
},
{
q: `SELECT "key", value FROM _tags`,
err: `unable to use system source '_tags': use SHOW TAG VALUES instead`,
},
} {
q, err := influxql.ParseQuery(tt.q)
if err != nil {
t.Errorf("%d. unable to parse: %s", i, tt.q)
continue
}
results, _ := e.ExecuteQuery(context.Background(), q, query.ExecutionOptions{})
result := <-results
if len(result.Series) != 0 {
t.Errorf("%d. expected %d rows, got %d", 0, i, len(result.Series))
}
if result.Err == nil || result.Err.Error() != tt.err {
t.Errorf("%d. unexpected error: %s", i, result.Err)
}
}
}
// This test verifies Statistics are gathered
// and that ExecuteDuration accounts for PlanDuration
func TestExecutor_ExecuteQuery_Statistics(t *testing.T) {
ctl := gomock.NewController(t)
defer ctl.Finish()
stmt := influxql.MustParseStatement("SELECT f0 FROM m0")
q := &influxql.Query{Statements: influxql.Statements{stmt, stmt}}
se := mocks.NewMockStatementExecutor(ctl)
se.EXPECT().ExecuteStatement(gomock.Any(), stmt, gomock.Any()).
Times(2).
DoAndReturn(func(ctx context.Context, statement influxql.Statement, ectx *query.ExecutionContext) error {
time.Sleep(10 * time.Millisecond)
ectx.StatisticsGatherer.Append(iql.NewImmutableCollector(iql.Statistics{PlanDuration: 5 * time.Millisecond}))
return nil
})
e := NewQueryExecutor(t)
e.StatementExecutor = se
ctx := context.Background()
results, stats := e.ExecuteQuery(ctx, q, query.ExecutionOptions{Quiet: true})
<-results
assert.GreaterOrEqual(t, int64(stats.ExecuteDuration), int64(10*time.Millisecond))
assert.Equal(t, 10*time.Millisecond, stats.PlanDuration)
assert.Equal(t, 2, stats.StatementCount)
}
func discardOutput(results <-chan *query.Result) {
for range results {
// Read all results and discard.
}
}

86
influxql/query/explain.go Normal file
View File

@ -0,0 +1,86 @@
package query
import (
"bytes"
"context"
"fmt"
"io"
"strings"
"github.com/influxdata/influxql"
)
func (p *preparedStatement) Explain(ctx context.Context) (string, error) {
// Determine the cost of all iterators created as part of this plan.
ic := &explainIteratorCreator{ic: p.ic}
p.ic = ic
cur, err := p.Select(ctx)
p.ic = ic.ic
if err != nil {
return "", err
}
cur.Close()
var buf bytes.Buffer
for i, node := range ic.nodes {
if i > 0 {
buf.WriteString("\n")
}
expr := "<nil>"
if node.Expr != nil {
expr = node.Expr.String()
}
fmt.Fprintf(&buf, "EXPRESSION: %s\n", expr)
if len(node.Aux) != 0 {
refs := make([]string, len(node.Aux))
for i, ref := range node.Aux {
refs[i] = ref.String()
}
fmt.Fprintf(&buf, "AUXILIARY FIELDS: %s\n", strings.Join(refs, ", "))
}
fmt.Fprintf(&buf, "NUMBER OF SHARDS: %d\n", node.Cost.NumShards)
fmt.Fprintf(&buf, "NUMBER OF SERIES: %d\n", node.Cost.NumSeries)
fmt.Fprintf(&buf, "CACHED VALUES: %d\n", node.Cost.CachedValues)
fmt.Fprintf(&buf, "NUMBER OF FILES: %d\n", node.Cost.NumFiles)
fmt.Fprintf(&buf, "NUMBER OF BLOCKS: %d\n", node.Cost.BlocksRead)
fmt.Fprintf(&buf, "SIZE OF BLOCKS: %d\n", node.Cost.BlockSize)
}
return buf.String(), nil
}
type planNode struct {
Expr influxql.Expr
Aux []influxql.VarRef
Cost IteratorCost
}
type explainIteratorCreator struct {
ic interface {
IteratorCreator
io.Closer
}
nodes []planNode
}
func (e *explainIteratorCreator) CreateIterator(ctx context.Context, m *influxql.Measurement, opt IteratorOptions) (Iterator, error) {
cost, err := e.ic.IteratorCost(ctx, m, opt)
if err != nil {
return nil, err
}
e.nodes = append(e.nodes, planNode{
Expr: opt.Expr,
Aux: opt.Aux,
Cost: cost,
})
return &nilFloatIterator{}, nil
}
func (e *explainIteratorCreator) IteratorCost(ctx context.Context, m *influxql.Measurement, opt IteratorOptions) (IteratorCost, error) {
return e.ic.IteratorCost(ctx, m, opt)
}
func (e *explainIteratorCreator) Close() error {
return e.ic.Close()
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,219 @@
package query
import (
"sort"
"time"
"math/rand"
)
{{with $types := .}}{{range $k := $types}}
// {{$k.Name}}PointAggregator aggregates points to produce a single point.
type {{$k.Name}}PointAggregator interface {
Aggregate{{$k.Name}}(p *{{$k.Name}}Point)
}
// {{$k.Name}}BulkPointAggregator aggregates multiple points at a time.
type {{$k.Name}}BulkPointAggregator interface {
Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point)
}
// Aggregate{{$k.Name}}Points feeds a slice of {{$k.Name}}Point into an
// aggregator. If the aggregator is a {{$k.Name}}BulkPointAggregator, it will
// use the AggregateBulk method.
func Aggregate{{$k.Name}}Points(a {{$k.Name}}PointAggregator, points []{{$k.Name}}Point) {
switch a := a.(type) {
case {{$k.Name}}BulkPointAggregator:
a.Aggregate{{$k.Name}}Bulk(points)
default:
for _, p := range points {
a.Aggregate{{$k.Name}}(&p)
}
}
}
// {{$k.Name}}PointEmitter produces a single point from an aggregate.
type {{$k.Name}}PointEmitter interface {
Emit() []{{$k.Name}}Point
}
{{range $v := $types}}
// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func is the function called by a {{$k.Name}}Point reducer.
type {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func func(prev *{{$v.Name}}Point, curr *{{$k.Name}}Point) (t int64, v {{$v.Type}}, aux []interface{})
// {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer is a reducer that reduces
// the passed in points to a single point using a reduce function.
type {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct {
prev *{{$v.Name}}Point
fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func
}
// New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer creates a new {{$k.Name}}Func{{$v.Name}}Reducer.
func New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func, prev *{{$v.Name}}Point) *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer {
return &{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn, prev: prev}
}
// Aggregate{{$k.Name}} takes a {{$k.Name}}Point and invokes the reduce function with the
// current and new point to modify the current point.
func (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {
t, v, aux := r.fn(r.prev, p)
if r.prev == nil {
r.prev = &{{$v.Name}}Point{}
}
r.prev.Time = t
r.prev.Value = v
r.prev.Aux = aux
if p.Aggregated > 1 {
r.prev.Aggregated += p.Aggregated
} else {
r.prev.Aggregated++
}
}
// Emit emits the point that was generated when reducing the points fed in with Aggregate{{$k.Name}}.
func (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point {
return []{{$v.Name}}Point{*r.prev}
}
// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc is the function called by a {{$k.Name}}Point reducer.
type {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc func(a []{{$k.Name}}Point) []{{$v.Name}}Point
// {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer is a reducer that aggregates
// the passed in points and then invokes the function to reduce the points when they are emitted.
type {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct {
points []{{$k.Name}}Point
fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc
}
// New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer creates a new {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer.
func New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc) *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer {
return &{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn}
}
// Aggregate{{$k.Name}} copies the {{$k.Name}}Point into the internal slice to be passed
// to the reduce function when Emit is called.
func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {
r.points = append(r.points, *p.Clone())
}
// Aggregate{{$k.Name}}Bulk performs a bulk copy of {{$k.Name}}Points into the internal slice.
// This is a more efficient version of calling Aggregate{{$k.Name}} on each point.
func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point) {
r.points = append(r.points, points...)
}
// Emit invokes the reduce function on the aggregated points to generate the aggregated points.
// This method does not clear the points from the internal slice.
func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point {
return r.fn(r.points)
}
{{end}}
// {{$k.Name}}DistinctReducer returns the distinct points in a series.
type {{$k.Name}}DistinctReducer struct {
m map[{{$k.Type}}]{{$k.Name}}Point
}
// New{{$k.Name}}DistinctReducer creates a new {{$k.Name}}DistinctReducer.
func New{{$k.Name}}DistinctReducer() *{{$k.Name}}DistinctReducer {
return &{{$k.Name}}DistinctReducer{m: make(map[{{$k.Type}}]{{$k.Name}}Point)}
}
// Aggregate{{$k.Name}} aggregates a point into the reducer.
func (r *{{$k.Name}}DistinctReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {
if _, ok := r.m[p.Value]; !ok {
r.m[p.Value] = *p
}
}
// Emit emits the distinct points that have been aggregated into the reducer.
func (r *{{$k.Name}}DistinctReducer) Emit() []{{$k.Name}}Point {
points := make([]{{$k.Name}}Point, 0, len(r.m))
for _, p := range r.m {
points = append(points, {{$k.Name}}Point{Time: p.Time, Value: p.Value})
}
sort.Sort({{$k.name}}Points(points))
return points
}
// {{$k.Name}}ElapsedReducer calculates the elapsed of the aggregated points.
type {{$k.Name}}ElapsedReducer struct {
unitConversion int64
prev {{$k.Name}}Point
curr {{$k.Name}}Point
}
// New{{$k.Name}}ElapsedReducer creates a new {{$k.Name}}ElapsedReducer.
func New{{$k.Name}}ElapsedReducer(interval Interval) *{{$k.Name}}ElapsedReducer {
return &{{$k.Name}}ElapsedReducer{
unitConversion: int64(interval.Duration),
prev: {{$k.Name}}Point{Nil: true},
curr: {{$k.Name}}Point{Nil: true},
}
}
// Aggregate{{$k.Name}} aggregates a point into the reducer and updates the current window.
func (r *{{$k.Name}}ElapsedReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {
r.prev = r.curr
r.curr = *p
}
// Emit emits the elapsed of the reducer at the current point.
func (r *{{$k.Name}}ElapsedReducer) Emit() []IntegerPoint {
if !r.prev.Nil {
elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion
return []IntegerPoint{
{Time: r.curr.Time, Value: elapsed},
}
}
return nil
}
// {{$k.Name}}SampleReducer implements a reservoir sampling to calculate a random subset of points
type {{$k.Name}}SampleReducer struct {
count int // how many points we've iterated over
rng *rand.Rand // random number generator for each reducer
points {{$k.name}}Points // the reservoir
}
// New{{$k.Name}}SampleReducer creates a new {{$k.Name}}SampleReducer
func New{{$k.Name}}SampleReducer(size int) *{{$k.Name}}SampleReducer {
return &{{$k.Name}}SampleReducer{
rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/
points: make({{$k.name}}Points, size),
}
}
// Aggregate{{$k.Name}} aggregates a point into the reducer.
func (r *{{$k.Name}}SampleReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {
r.count++
// Fill the reservoir with the first n points
if r.count-1 < len(r.points) {
p.CopyTo(&r.points[r.count-1])
return
}
// Generate a random integer between 1 and the count and
// if that number is less than the length of the slice
// replace the point at that index rnd with p.
rnd := r.rng.Intn(r.count)
if rnd < len(r.points) {
p.CopyTo(&r.points[rnd])
}
}
// Emit emits the reservoir sample as many points.
func (r *{{$k.Name}}SampleReducer) Emit() []{{$k.Name}}Point {
min := len(r.points)
if r.count < min {
min = r.count
}
pts := r.points[:min]
sort.Sort(pts)
return pts
}
{{end}}{{end}}

2152
influxql/query/functions.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,499 @@
package query_test
import (
"math"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/influxdata/influxdb/v2/influxql/query"
"github.com/influxdata/influxdb/v2/pkg/deep"
"github.com/influxdata/influxql"
)
func almostEqual(got, exp float64) bool {
return math.Abs(got-exp) < 1e-5 && !math.IsNaN(got)
}
func TestHoltWinters_AusTourists(t *testing.T) {
hw := query.NewFloatHoltWintersReducer(10, 4, false, 1)
// Dataset from http://www.inside-r.org/packages/cran/fpp/docs/austourists
austourists := []query.FloatPoint{
{Time: 1, Value: 30.052513},
{Time: 2, Value: 19.148496},
{Time: 3, Value: 25.317692},
{Time: 4, Value: 27.591437},
{Time: 5, Value: 32.076456},
{Time: 6, Value: 23.487961},
{Time: 7, Value: 28.47594},
{Time: 8, Value: 35.123753},
{Time: 9, Value: 36.838485},
{Time: 10, Value: 25.007017},
{Time: 11, Value: 30.72223},
{Time: 12, Value: 28.693759},
{Time: 13, Value: 36.640986},
{Time: 14, Value: 23.824609},
{Time: 15, Value: 29.311683},
{Time: 16, Value: 31.770309},
{Time: 17, Value: 35.177877},
{Time: 18, Value: 19.775244},
{Time: 19, Value: 29.60175},
{Time: 20, Value: 34.538842},
{Time: 21, Value: 41.273599},
{Time: 22, Value: 26.655862},
{Time: 23, Value: 28.279859},
{Time: 24, Value: 35.191153},
{Time: 25, Value: 41.727458},
{Time: 26, Value: 24.04185},
{Time: 27, Value: 32.328103},
{Time: 28, Value: 37.328708},
{Time: 29, Value: 46.213153},
{Time: 30, Value: 29.346326},
{Time: 31, Value: 36.48291},
{Time: 32, Value: 42.977719},
{Time: 33, Value: 48.901525},
{Time: 34, Value: 31.180221},
{Time: 35, Value: 37.717881},
{Time: 36, Value: 40.420211},
{Time: 37, Value: 51.206863},
{Time: 38, Value: 31.887228},
{Time: 39, Value: 40.978263},
{Time: 40, Value: 43.772491},
{Time: 41, Value: 55.558567},
{Time: 42, Value: 33.850915},
{Time: 43, Value: 42.076383},
{Time: 44, Value: 45.642292},
{Time: 45, Value: 59.76678},
{Time: 46, Value: 35.191877},
{Time: 47, Value: 44.319737},
{Time: 48, Value: 47.913736},
}
for _, p := range austourists {
hw.AggregateFloat(&p)
}
points := hw.Emit()
forecasted := []query.FloatPoint{
{Time: 49, Value: 51.85064132137853},
{Time: 50, Value: 43.26055282315273},
{Time: 51, Value: 41.827258044814464},
{Time: 52, Value: 54.3990354591749},
{Time: 53, Value: 54.62334472770803},
{Time: 54, Value: 45.57155693625209},
{Time: 55, Value: 44.06051240252263},
{Time: 56, Value: 57.30029870759433},
{Time: 57, Value: 57.53591513519172},
{Time: 58, Value: 47.999008139396096},
}
if exp, got := len(forecasted), len(points); exp != got {
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
}
for i := range forecasted {
if exp, got := forecasted[i].Time, points[i].Time; got != exp {
t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp)
}
if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {
t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp)
}
}
}
func TestHoltWinters_AusTourists_Missing(t *testing.T) {
hw := query.NewFloatHoltWintersReducer(10, 4, false, 1)
// Dataset from http://www.inside-r.org/packages/cran/fpp/docs/austourists
austourists := []query.FloatPoint{
{Time: 1, Value: 30.052513},
{Time: 3, Value: 25.317692},
{Time: 4, Value: 27.591437},
{Time: 5, Value: 32.076456},
{Time: 6, Value: 23.487961},
{Time: 7, Value: 28.47594},
{Time: 9, Value: 36.838485},
{Time: 10, Value: 25.007017},
{Time: 11, Value: 30.72223},
{Time: 12, Value: 28.693759},
{Time: 13, Value: 36.640986},
{Time: 14, Value: 23.824609},
{Time: 15, Value: 29.311683},
{Time: 16, Value: 31.770309},
{Time: 17, Value: 35.177877},
{Time: 19, Value: 29.60175},
{Time: 20, Value: 34.538842},
{Time: 21, Value: 41.273599},
{Time: 22, Value: 26.655862},
{Time: 23, Value: 28.279859},
{Time: 24, Value: 35.191153},
{Time: 25, Value: 41.727458},
{Time: 26, Value: 24.04185},
{Time: 27, Value: 32.328103},
{Time: 28, Value: 37.328708},
{Time: 30, Value: 29.346326},
{Time: 31, Value: 36.48291},
{Time: 32, Value: 42.977719},
{Time: 34, Value: 31.180221},
{Time: 35, Value: 37.717881},
{Time: 36, Value: 40.420211},
{Time: 37, Value: 51.206863},
{Time: 38, Value: 31.887228},
{Time: 41, Value: 55.558567},
{Time: 42, Value: 33.850915},
{Time: 43, Value: 42.076383},
{Time: 44, Value: 45.642292},
{Time: 45, Value: 59.76678},
{Time: 46, Value: 35.191877},
{Time: 47, Value: 44.319737},
{Time: 48, Value: 47.913736},
}
for _, p := range austourists {
hw.AggregateFloat(&p)
}
points := hw.Emit()
forecasted := []query.FloatPoint{
{Time: 49, Value: 54.84533610387743},
{Time: 50, Value: 41.19329421863249},
{Time: 51, Value: 45.71673175112451},
{Time: 52, Value: 56.05759298805955},
{Time: 53, Value: 59.32337460282217},
{Time: 54, Value: 44.75280096850461},
{Time: 55, Value: 49.98865098113751},
{Time: 56, Value: 61.86084934967605},
{Time: 57, Value: 65.95805633454883},
{Time: 58, Value: 50.1502170480547},
}
if exp, got := len(forecasted), len(points); exp != got {
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
}
for i := range forecasted {
if exp, got := forecasted[i].Time, points[i].Time; got != exp {
t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp)
}
if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {
t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp)
}
}
}
func TestHoltWinters_USPopulation(t *testing.T) {
series := []query.FloatPoint{
{Time: 1, Value: 3.93},
{Time: 2, Value: 5.31},
{Time: 3, Value: 7.24},
{Time: 4, Value: 9.64},
{Time: 5, Value: 12.90},
{Time: 6, Value: 17.10},
{Time: 7, Value: 23.20},
{Time: 8, Value: 31.40},
{Time: 9, Value: 39.80},
{Time: 10, Value: 50.20},
{Time: 11, Value: 62.90},
{Time: 12, Value: 76.00},
{Time: 13, Value: 92.00},
{Time: 14, Value: 105.70},
{Time: 15, Value: 122.80},
{Time: 16, Value: 131.70},
{Time: 17, Value: 151.30},
{Time: 18, Value: 179.30},
{Time: 19, Value: 203.20},
}
hw := query.NewFloatHoltWintersReducer(10, 0, true, 1)
for _, p := range series {
hw.AggregateFloat(&p)
}
points := hw.Emit()
forecasted := []query.FloatPoint{
{Time: 1, Value: 3.93},
{Time: 2, Value: 4.957405463559748},
{Time: 3, Value: 7.012210102535647},
{Time: 4, Value: 10.099589257439924},
{Time: 5, Value: 14.229926188104242},
{Time: 6, Value: 19.418878968703797},
{Time: 7, Value: 25.68749172281409},
{Time: 8, Value: 33.062351305731305},
{Time: 9, Value: 41.575791076125206},
{Time: 10, Value: 51.26614395589263},
{Time: 11, Value: 62.178047564264595},
{Time: 12, Value: 74.36280483872488},
{Time: 13, Value: 87.87880423073163},
{Time: 14, Value: 102.79200429905801},
{Time: 15, Value: 119.17648832929542},
{Time: 16, Value: 137.11509549747296},
{Time: 17, Value: 156.70013608313175},
{Time: 18, Value: 178.03419933863566},
{Time: 19, Value: 201.23106385518594},
{Time: 20, Value: 226.4167216525905},
{Time: 21, Value: 253.73052878285205},
{Time: 22, Value: 283.32649700397553},
{Time: 23, Value: 315.37474308085984},
{Time: 24, Value: 350.06311454009256},
{Time: 25, Value: 387.59901328556873},
{Time: 26, Value: 428.21144141893404},
{Time: 27, Value: 472.1532969569147},
{Time: 28, Value: 519.7039509590035},
{Time: 29, Value: 571.1721419458248},
}
if exp, got := len(forecasted), len(points); exp != got {
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
}
for i := range forecasted {
if exp, got := forecasted[i].Time, points[i].Time; got != exp {
t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp)
}
if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {
t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp)
}
}
}
func TestHoltWinters_USPopulation_Missing(t *testing.T) {
series := []query.FloatPoint{
{Time: 1, Value: 3.93},
{Time: 2, Value: 5.31},
{Time: 3, Value: 7.24},
{Time: 4, Value: 9.64},
{Time: 5, Value: 12.90},
{Time: 6, Value: 17.10},
{Time: 7, Value: 23.20},
{Time: 8, Value: 31.40},
{Time: 10, Value: 50.20},
{Time: 11, Value: 62.90},
{Time: 12, Value: 76.00},
{Time: 13, Value: 92.00},
{Time: 15, Value: 122.80},
{Time: 16, Value: 131.70},
{Time: 17, Value: 151.30},
{Time: 19, Value: 203.20},
}
hw := query.NewFloatHoltWintersReducer(10, 0, true, 1)
for _, p := range series {
hw.AggregateFloat(&p)
}
points := hw.Emit()
forecasted := []query.FloatPoint{
{Time: 1, Value: 3.93},
{Time: 2, Value: 4.8931364428135105},
{Time: 3, Value: 6.962653629047061},
{Time: 4, Value: 10.056207765903274},
{Time: 5, Value: 14.18435088129532},
{Time: 6, Value: 19.362939306110846},
{Time: 7, Value: 25.613247940326584},
{Time: 8, Value: 32.96213087008264},
{Time: 9, Value: 41.442230043017204},
{Time: 10, Value: 51.09223428526052},
{Time: 11, Value: 61.95719155158485},
{Time: 12, Value: 74.08887794968567},
{Time: 13, Value: 87.54622778052787},
{Time: 14, Value: 102.39582960014131},
{Time: 15, Value: 118.7124941463221},
{Time: 16, Value: 136.57990089987464},
{Time: 17, Value: 156.09133107941278},
{Time: 18, Value: 177.35049601833734},
{Time: 19, Value: 200.472471161683},
{Time: 20, Value: 225.58474737097785},
{Time: 21, Value: 252.82841286206823},
{Time: 22, Value: 282.35948095261017},
{Time: 23, Value: 314.3503808953992},
{Time: 24, Value: 348.99163145856954},
{Time: 25, Value: 386.49371962730555},
{Time: 26, Value: 427.08920989407727},
{Time: 27, Value: 471.0351131332573},
{Time: 28, Value: 518.615548088049},
{Time: 29, Value: 570.1447331101863},
}
if exp, got := len(forecasted), len(points); exp != got {
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
}
for i := range forecasted {
if exp, got := forecasted[i].Time, points[i].Time; got != exp {
t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp)
}
if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {
t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp)
}
}
}
func TestHoltWinters_RoundTime(t *testing.T) {
maxTime := time.Unix(0, influxql.MaxTime).Round(time.Second).UnixNano()
data := []query.FloatPoint{
{Time: maxTime - int64(5*time.Second), Value: 1},
{Time: maxTime - int64(4*time.Second+103*time.Millisecond), Value: 10},
{Time: maxTime - int64(3*time.Second+223*time.Millisecond), Value: 2},
{Time: maxTime - int64(2*time.Second+481*time.Millisecond), Value: 11},
}
hw := query.NewFloatHoltWintersReducer(2, 2, true, time.Second)
for _, p := range data {
hw.AggregateFloat(&p)
}
points := hw.Emit()
forecasted := []query.FloatPoint{
{Time: maxTime - int64(5*time.Second), Value: 1},
{Time: maxTime - int64(4*time.Second), Value: 10.006729104838234},
{Time: maxTime - int64(3*time.Second), Value: 1.998341814469269},
{Time: maxTime - int64(2*time.Second), Value: 10.997858830631172},
{Time: maxTime - int64(1*time.Second), Value: 4.085860238030013},
{Time: maxTime - int64(0*time.Second), Value: 11.35713604403339},
}
if exp, got := len(forecasted), len(points); exp != got {
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
}
for i := range forecasted {
if exp, got := forecasted[i].Time, points[i].Time; got != exp {
t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp)
}
if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {
t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp)
}
}
}
func TestHoltWinters_MaxTime(t *testing.T) {
data := []query.FloatPoint{
{Time: influxql.MaxTime - 1, Value: 1},
{Time: influxql.MaxTime, Value: 2},
}
hw := query.NewFloatHoltWintersReducer(1, 0, true, 1)
for _, p := range data {
hw.AggregateFloat(&p)
}
points := hw.Emit()
forecasted := []query.FloatPoint{
{Time: influxql.MaxTime - 1, Value: 1},
{Time: influxql.MaxTime, Value: 2.001516944066403},
{Time: influxql.MaxTime + 1, Value: 2.5365248972488343},
}
if exp, got := len(forecasted), len(points); exp != got {
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
}
for i := range forecasted {
if exp, got := forecasted[i].Time, points[i].Time; got != exp {
t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp)
}
if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {
t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp)
}
}
}
// TestSample_AllSamplesSeen attempts to verify that it is possible
// to get every subsample in a reasonable number of iterations.
//
// The idea here is that 30 iterations should be enough to hit every possible
// sequence at least once.
func TestSample_AllSamplesSeen(t *testing.T) {
ps := []query.FloatPoint{
{Time: 1, Value: 1},
{Time: 2, Value: 2},
{Time: 3, Value: 3},
}
// List of all the possible subsamples
samples := [][]query.FloatPoint{
{
{Time: 1, Value: 1},
{Time: 2, Value: 2},
},
{
{Time: 1, Value: 1},
{Time: 3, Value: 3},
},
{
{Time: 2, Value: 2},
{Time: 3, Value: 3},
},
}
// 30 iterations should be sufficient to guarantee that
// we hit every possible subsample.
for i := 0; i < 30; i++ {
s := query.NewFloatSampleReducer(2)
for _, p := range ps {
s.AggregateFloat(&p)
}
points := s.Emit()
for i, sample := range samples {
// if we find a sample that it matches, remove it from
// this list of possible samples
if deep.Equal(sample, points) {
samples = append(samples[:i], samples[i+1:]...)
break
}
}
// if samples is empty we've seen every sample, so we're done
if len(samples) == 0 {
return
}
// The FloatSampleReducer is seeded with time.Now().UnixNano(), and without this sleep,
// this test will fail on machines where UnixNano doesn't return full resolution.
// Specifically, some Windows machines will only return timestamps accurate to 100ns.
// While iterating through this test without an explicit sleep,
// we would only see one or two unique seeds across all the calls to NewFloatSampleReducer.
time.Sleep(time.Millisecond)
}
// If we missed a sample, report the error
if len(samples) != 0 {
t.Fatalf("expected all samples to be seen; unseen samples: %#v", samples)
}
}
func TestSample_SampleSizeLessThanNumPoints(t *testing.T) {
s := query.NewFloatSampleReducer(2)
ps := []query.FloatPoint{
{Time: 1, Value: 1},
{Time: 2, Value: 2},
{Time: 3, Value: 3},
}
for _, p := range ps {
s.AggregateFloat(&p)
}
points := s.Emit()
if exp, got := 2, len(points); exp != got {
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
}
}
func TestSample_SampleSizeGreaterThanNumPoints(t *testing.T) {
s := query.NewFloatSampleReducer(4)
ps := []query.FloatPoint{
{Time: 1, Value: 1},
{Time: 2, Value: 2},
{Time: 3, Value: 3},
}
for _, p := range ps {
s.AggregateFloat(&p)
}
points := s.Emit()
if exp, got := len(ps), len(points); exp != got {
t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp)
}
if !deep.Equal(ps, points) {
t.Fatalf("unexpected points: %s", spew.Sdump(points))
}
}

View File

@ -0,0 +1,3 @@
This is a port of [gota](https://github.com/phemmer/gota) to be adapted inside of InfluxDB.
This port was made with the permission of the author, Patrick Hemmer, and has been modified to remove dependencies that are not part of InfluxDB.

View File

@ -0,0 +1,127 @@
package gota
// CMO - Chande Momentum Oscillator (https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/cmo)
type CMO struct {
points []cmoPoint
sumUp float64
sumDown float64
count int
idx int // index of newest point
}
type cmoPoint struct {
price float64
diff float64
}
// NewCMO constructs a new CMO.
func NewCMO(inTimePeriod int) *CMO {
return &CMO{
points: make([]cmoPoint, inTimePeriod-1),
}
}
// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed".
func (cmo *CMO) WarmCount() int {
return len(cmo.points)
}
// Add adds a new sample value to the algorithm and returns the computed value.
func (cmo *CMO) Add(v float64) float64 {
idxOldest := cmo.idx + 1
if idxOldest == len(cmo.points) {
idxOldest = 0
}
var diff float64
if cmo.count != 0 {
prev := cmo.points[cmo.idx]
diff = v - prev.price
if diff > 0 {
cmo.sumUp += diff
} else if diff < 0 {
cmo.sumDown -= diff
}
}
var outV float64
if cmo.sumUp != 0 || cmo.sumDown != 0 {
outV = 100.0 * ((cmo.sumUp - cmo.sumDown) / (cmo.sumUp + cmo.sumDown))
}
oldest := cmo.points[idxOldest]
//NOTE: because we're just adding and subtracting the difference, and not recalculating sumUp/sumDown using cmo.points[].price, it's possible for imprecision to creep in over time. Not sure how significant this is going to be, but if we want to fix it, we could recalculate it from scratch every N points.
if oldest.diff > 0 {
cmo.sumUp -= oldest.diff
} else if oldest.diff < 0 {
cmo.sumDown += oldest.diff
}
p := cmoPoint{
price: v,
diff: diff,
}
cmo.points[idxOldest] = p
cmo.idx = idxOldest
if !cmo.Warmed() {
cmo.count++
}
return outV
}
// Warmed indicates whether the algorithm has enough data to generate accurate results.
func (cmo *CMO) Warmed() bool {
return cmo.count == len(cmo.points)+2
}
// CMOS is a smoothed version of the Chande Momentum Oscillator.
// This is the version of CMO utilized by ta-lib.
type CMOS struct {
emaUp EMA
emaDown EMA
lastV float64
}
// NewCMOS constructs a new CMOS.
func NewCMOS(inTimePeriod int, warmType WarmupType) *CMOS {
ema := NewEMA(inTimePeriod+1, warmType)
ema.alpha = float64(1) / float64(inTimePeriod)
return &CMOS{
emaUp: *ema,
emaDown: *ema,
}
}
// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed".
func (cmos CMOS) WarmCount() int {
return cmos.emaUp.WarmCount()
}
// Warmed indicates whether the algorithm has enough data to generate accurate results.
func (cmos CMOS) Warmed() bool {
return cmos.emaUp.Warmed()
}
// Last returns the last output value.
func (cmos CMOS) Last() float64 {
up := cmos.emaUp.Last()
down := cmos.emaDown.Last()
return 100.0 * ((up - down) / (up + down))
}
// Add adds a new sample value to the algorithm and returns the computed value.
func (cmos *CMOS) Add(v float64) float64 {
var up float64
var down float64
if v > cmos.lastV {
up = v - cmos.lastV
} else if v < cmos.lastV {
down = cmos.lastV - v
}
cmos.emaUp.Add(up)
cmos.emaDown.Add(down)
cmos.lastV = v
return cmos.Last()
}

View File

@ -0,0 +1,41 @@
package gota
import "testing"
func TestCMO(t *testing.T) {
list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
expList := []float64{100, 100, 100, 100, 100, 80, 60, 40, 20, 0, -20, -40, -60, -80, -100, -100, -100, -100, -100}
cmo := NewCMO(10)
var actList []float64
for _, v := range list {
if vOut := cmo.Add(v); cmo.Warmed() {
actList = append(actList, vOut)
}
}
if diff := diffFloats(expList, actList, 1e-7); diff != "" {
t.Errorf("unexpected floats:\n%s", diff)
}
}
func TestCMOS(t *testing.T) {
list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
// expList is generated by the following code:
// expList, _ := talib.Cmo(list, 10, nil)
expList := []float64{100, 100, 100, 100, 100, 80, 61.999999999999986, 45.79999999999999, 31.22, 18.097999999999992, 6.288199999999988, -4.340620000000012, -13.906558000000008, -22.515902200000014, -30.264311980000013, -37.23788078200001, -43.51409270380002, -49.16268343342002, -54.24641509007802}
cmo := NewCMOS(10, WarmSMA)
var actList []float64
for _, v := range list {
if vOut := cmo.Add(v); cmo.Warmed() {
actList = append(actList, vOut)
}
}
if diff := diffFloats(expList, actList, 1e-7); diff != "" {
t.Errorf("unexpected floats:\n%s", diff)
}
}

View File

@ -0,0 +1,188 @@
package gota
import (
"fmt"
)
type AlgSimple interface {
Add(float64) float64
Warmed() bool
WarmCount() int
}
type WarmupType int8
const (
WarmEMA WarmupType = iota // Exponential Moving Average
WarmSMA // Simple Moving Average
)
func ParseWarmupType(wt string) (WarmupType, error) {
switch wt {
case "exponential":
return WarmEMA, nil
case "simple":
return WarmSMA, nil
default:
return 0, fmt.Errorf("invalid warmup type '%s'", wt)
}
}
// EMA - Exponential Moving Average (http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:moving_averages#exponential_moving_average_calculation)
type EMA struct {
inTimePeriod int
last float64
count int
alpha float64
warmType WarmupType
}
// NewEMA constructs a new EMA.
//
// When warmed with WarmSMA the first inTimePeriod samples will result in a simple average, switching to exponential moving average after warmup is complete.
//
// When warmed with WarmEMA the algorithm immediately starts using an exponential moving average for the output values. During the warmup period the alpha value is scaled to prevent unbalanced weighting on initial values.
func NewEMA(inTimePeriod int, warmType WarmupType) *EMA {
return &EMA{
inTimePeriod: inTimePeriod,
alpha: 2 / float64(inTimePeriod+1),
warmType: warmType,
}
}
// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed".
func (ema *EMA) WarmCount() int {
return ema.inTimePeriod - 1
}
// Warmed indicates whether the algorithm has enough data to generate accurate results.
func (ema *EMA) Warmed() bool {
return ema.count == ema.inTimePeriod
}
// Last returns the last output value.
func (ema *EMA) Last() float64 {
return ema.last
}
// Add adds a new sample value to the algorithm and returns the computed value.
func (ema *EMA) Add(v float64) float64 {
var avg float64
if ema.count == 0 {
avg = v
} else {
lastAvg := ema.Last()
if !ema.Warmed() {
if ema.warmType == WarmSMA {
avg = (lastAvg*float64(ema.count) + v) / float64(ema.count+1)
} else { // ema.warmType == WarmEMA
// scale the alpha so that we don't excessively weight the result towards the first value
alpha := 2 / float64(ema.count+2)
avg = (v-lastAvg)*alpha + lastAvg
}
} else {
avg = (v-lastAvg)*ema.alpha + lastAvg
}
}
ema.last = avg
if ema.count < ema.inTimePeriod {
// don't just keep incrementing to prevent potential overflow
ema.count++
}
return avg
}
// DEMA - Double Exponential Moving Average (https://en.wikipedia.org/wiki/Double_exponential_moving_average)
type DEMA struct {
ema1 EMA
ema2 EMA
}
// NewDEMA constructs a new DEMA.
//
// When warmed with WarmSMA the first inTimePeriod samples will result in a simple average, switching to exponential moving average after warmup is complete.
//
// When warmed with WarmEMA the algorithm immediately starts using an exponential moving average for the output values. During the warmup period the alpha value is scaled to prevent unbalanced weighting on initial values.
func NewDEMA(inTimePeriod int, warmType WarmupType) *DEMA {
return &DEMA{
ema1: *NewEMA(inTimePeriod, warmType),
ema2: *NewEMA(inTimePeriod, warmType),
}
}
// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed".
func (dema *DEMA) WarmCount() int {
if dema.ema1.warmType == WarmEMA {
return dema.ema1.WarmCount()
}
return dema.ema1.WarmCount() + dema.ema2.WarmCount()
}
// Add adds a new sample value to the algorithm and returns the computed value.
func (dema *DEMA) Add(v float64) float64 {
avg1 := dema.ema1.Add(v)
var avg2 float64
if dema.ema1.Warmed() || dema.ema1.warmType == WarmEMA {
avg2 = dema.ema2.Add(avg1)
} else {
avg2 = avg1
}
return 2*avg1 - avg2
}
// Warmed indicates whether the algorithm has enough data to generate accurate results.
func (dema *DEMA) Warmed() bool {
return dema.ema2.Warmed()
}
// TEMA - Triple Exponential Moving Average (https://en.wikipedia.org/wiki/Triple_exponential_moving_average)
type TEMA struct {
ema1 EMA
ema2 EMA
ema3 EMA
}
// NewTEMA constructs a new TEMA.
//
// When warmed with WarmSMA the first inTimePeriod samples will result in a simple average, switching to exponential moving average after warmup is complete.
//
// When warmed with WarmEMA the algorithm immediately starts using an exponential moving average for the output values. During the warmup period the alpha value is scaled to prevent unbalanced weighting on initial values.
func NewTEMA(inTimePeriod int, warmType WarmupType) *TEMA {
return &TEMA{
ema1: *NewEMA(inTimePeriod, warmType),
ema2: *NewEMA(inTimePeriod, warmType),
ema3: *NewEMA(inTimePeriod, warmType),
}
}
// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed".
func (tema *TEMA) WarmCount() int {
if tema.ema1.warmType == WarmEMA {
return tema.ema1.WarmCount()
}
return tema.ema1.WarmCount() + tema.ema2.WarmCount() + tema.ema3.WarmCount()
}
// Add adds a new sample value to the algorithm and returns the computed value.
func (tema *TEMA) Add(v float64) float64 {
avg1 := tema.ema1.Add(v)
var avg2 float64
if tema.ema1.Warmed() || tema.ema1.warmType == WarmEMA {
avg2 = tema.ema2.Add(avg1)
} else {
avg2 = avg1
}
var avg3 float64
if tema.ema2.Warmed() || tema.ema2.warmType == WarmEMA {
avg3 = tema.ema3.Add(avg2)
} else {
avg3 = avg2
}
return 3*avg1 - 3*avg2 + avg3
}
// Warmed indicates whether the algorithm has enough data to generate accurate results.
func (tema *TEMA) Warmed() bool {
return tema.ema3.Warmed()
}

View File

@ -0,0 +1,114 @@
package gota
import "testing"
func TestEMA(t *testing.T) {
list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
// expList is generated by the following code:
// expList, _ := talib.Ema(list, 10, nil)
expList := []float64{5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.136363636363637, 11.475206611570249, 11.570623591284749, 11.466873847414794, 11.200169511521196, 10.800138691244614, 10.291022565563775, 9.692654826370362, 9.021263039757569, 8.290124305256192, 7.510101704300521, 6.690083212609517, 5.837340810316878, 4.957824299350173}
ema := NewEMA(10, WarmSMA)
var actList []float64
for _, v := range list {
if vOut := ema.Add(v); ema.Warmed() {
actList = append(actList, vOut)
}
}
if diff := diffFloats(expList, actList, 0.0000001); diff != "" {
t.Errorf("unexpected floats:\n%s", diff)
}
}
func TestDEMA(t *testing.T) {
list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
// expList is generated by the following code:
// expList, _ := talib.Dema(list, 10, nil)
expList := []float64{13.568840926166246, 12.701748119313985, 11.701405062848783, 10.611872766773773, 9.465595022565749, 8.28616628396151, 7.090477085921927, 5.8903718513360275, 4.693925476073202, 3.5064225149113692, 2.331104912318361}
dema := NewDEMA(10, WarmSMA)
var actList []float64
for _, v := range list {
if vOut := dema.Add(v); dema.Warmed() {
actList = append(actList, vOut)
}
}
if diff := diffFloats(expList, actList, 0.0000001); diff != "" {
t.Errorf("unexpected floats:\n%s", diff)
}
}
func TestTEMA(t *testing.T) {
list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
// expList is generated by the following code:
// expList, _ := talib.Tema(list, 4, nil)
expList := []float64{10, 11, 12, 13, 14, 15, 14.431999999999995, 13.345600000000001, 12.155520000000001, 11, 9.906687999999997, 8.86563072, 7.8589122560000035, 6.871005491200005, 5.891160883200005, 4.912928706560004, 3.932955104051203, 2.9498469349785603, 1.9633255712030717, 0.9736696408637435}
tema := NewTEMA(4, WarmSMA)
var actList []float64
for _, v := range list {
if vOut := tema.Add(v); tema.Warmed() {
actList = append(actList, vOut)
}
}
if diff := diffFloats(expList, actList, 0.0000001); diff != "" {
t.Errorf("unexpected floats:\n%s", diff)
}
}
func TestEmaWarmCount(t *testing.T) {
period := 9
ema := NewEMA(period, WarmSMA)
var i int
for i = 0; i < period*10; i++ {
ema.Add(float64(i))
if ema.Warmed() {
break
}
}
if got, want := i, ema.WarmCount(); got != want {
t.Errorf("unexpected warm count: got=%d want=%d", got, want)
}
}
func TestDemaWarmCount(t *testing.T) {
period := 9
dema := NewDEMA(period, WarmSMA)
var i int
for i = 0; i < period*10; i++ {
dema.Add(float64(i))
if dema.Warmed() {
break
}
}
if got, want := i, dema.WarmCount(); got != want {
t.Errorf("unexpected warm count: got=%d want=%d", got, want)
}
}
func TestTemaWarmCount(t *testing.T) {
period := 9
tema := NewTEMA(period, WarmSMA)
var i int
for i = 0; i < period*10; i++ {
tema.Add(float64(i))
if tema.Warmed() {
break
}
}
if got, want := i, tema.WarmCount(); got != want {
t.Errorf("unexpected warm count: got=%d want=%d", got, want)
}
}

View File

@ -0,0 +1,113 @@
package gota
import (
"math"
)
// KER - Kaufman's Efficiency Ratio (http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:kaufman_s_adaptive_moving_average#efficiency_ratio_er)
type KER struct {
points []kerPoint
noise float64
count int
idx int // index of newest point
}
type kerPoint struct {
price float64
diff float64
}
// NewKER constructs a new KER.
func NewKER(inTimePeriod int) *KER {
return &KER{
points: make([]kerPoint, inTimePeriod),
}
}
// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed".
func (ker *KER) WarmCount() int {
return len(ker.points)
}
// Add adds a new sample value to the algorithm and returns the computed value.
func (ker *KER) Add(v float64) float64 {
//TODO this does not return a sensible value if not warmed.
n := len(ker.points)
idxOldest := ker.idx + 1
if idxOldest >= n {
idxOldest = 0
}
signal := math.Abs(v - ker.points[idxOldest].price)
kp := kerPoint{
price: v,
diff: math.Abs(v - ker.points[ker.idx].price),
}
ker.noise -= ker.points[idxOldest].diff
ker.noise += kp.diff
noise := ker.noise
ker.idx = idxOldest
ker.points[ker.idx] = kp
if !ker.Warmed() {
ker.count++
}
if signal == 0 || noise == 0 {
return 0
}
return signal / noise
}
// Warmed indicates whether the algorithm has enough data to generate accurate results.
func (ker *KER) Warmed() bool {
return ker.count == len(ker.points)+1
}
// KAMA - Kaufman's Adaptive Moving Average (http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:kaufman_s_adaptive_moving_average)
type KAMA struct {
ker KER
last float64
}
// NewKAMA constructs a new KAMA.
func NewKAMA(inTimePeriod int) *KAMA {
ker := NewKER(inTimePeriod)
return &KAMA{
ker: *ker,
}
}
// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed".
func (kama *KAMA) WarmCount() int {
return kama.ker.WarmCount()
}
// Add adds a new sample value to the algorithm and returns the computed value.
func (kama *KAMA) Add(v float64) float64 {
if !kama.Warmed() {
/*
// initialize with a simple moving average
kama.last = 0
for _, v := range kama.ker.points[:kama.ker.count] {
kama.last += v
}
kama.last /= float64(kama.ker.count + 1)
*/
// initialize with the last value
kama.last = kama.ker.points[kama.ker.idx].price
}
er := kama.ker.Add(v)
sc := math.Pow(er*(2.0/(2.0+1.0)-2.0/(30.0+1.0))+2.0/(30.0+1.0), 2)
kama.last = kama.last + sc*(v-kama.last)
return kama.last
}
// Warmed indicates whether the algorithm has enough data to generate accurate results.
func (kama *KAMA) Warmed() bool {
return kama.ker.Warmed()
}

View File

@ -0,0 +1,70 @@
package gota
import "testing"
func TestKER(t *testing.T) {
list := []float64{20, 21, 22, 23, 22, 21}
expList := []float64{1, 1.0 / 3, 1.0 / 3}
ker := NewKER(3)
var actList []float64
for _, v := range list {
if vOut := ker.Add(v); ker.Warmed() {
actList = append(actList, vOut)
}
}
if diff := diffFloats(expList, actList, 0.0000001); diff != "" {
t.Errorf("unexpected floats:\n%s", diff)
}
}
func TestKAMA(t *testing.T) {
list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
// expList is generated by the following code:
// expList, _ := talib.Cmo(list, 10, nil)
expList := []float64{10.444444444444445, 11.135802469135802, 11.964334705075446, 12.869074836153025, 13.81615268675168, 13.871008014588556, 13.71308456353558, 13.553331356741122, 13.46599437575161, 13.4515677602438, 13.29930139347417, 12.805116570729284, 11.752584300922967, 10.036160535131103, 7.797866963961725, 6.109926091089847, 4.727736717272138, 3.5154092873734104, 2.3974496040963396}
kama := NewKAMA(10)
var actList []float64
for _, v := range list {
if vOut := kama.Add(v); kama.Warmed() {
actList = append(actList, vOut)
}
}
if diff := diffFloats(expList, actList, 0.0000001); diff != "" {
t.Errorf("unexpected floats:\n%s", diff)
}
}
func TestKAMAWarmCount(t *testing.T) {
period := 9
kama := NewKAMA(period)
var i int
for i = 0; i < period*10; i++ {
kama.Add(float64(i))
if kama.Warmed() {
break
}
}
if got, want := i, kama.WarmCount(); got != want {
t.Errorf("unexpected warm count: got=%d want=%d", got, want)
}
}
var BenchmarkKAMAVal float64
func BenchmarkKAMA(b *testing.B) {
list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
for n := 0; n < b.N; n++ {
kama := NewKAMA(5)
for _, v := range list {
BenchmarkKAMAVal = kama.Add(v)
}
}
}

View File

@ -0,0 +1,48 @@
package gota
// RSI - Relative Strength Index (http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:relative_strength_index_rsi)
type RSI struct {
emaUp EMA
emaDown EMA
lastV float64
}
// NewRSI constructs a new RSI.
func NewRSI(inTimePeriod int, warmType WarmupType) *RSI {
ema := NewEMA(inTimePeriod+1, warmType)
ema.alpha = float64(1) / float64(inTimePeriod)
return &RSI{
emaUp: *ema,
emaDown: *ema,
}
}
// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed".
func (rsi RSI) WarmCount() int {
return rsi.emaUp.WarmCount()
}
// Warmed indicates whether the algorithm has enough data to generate accurate results.
func (rsi RSI) Warmed() bool {
return rsi.emaUp.Warmed()
}
// Last returns the last output value.
func (rsi RSI) Last() float64 {
return 100 - (100 / (1 + rsi.emaUp.Last()/rsi.emaDown.Last()))
}
// Add adds a new sample value to the algorithm and returns the computed value.
func (rsi *RSI) Add(v float64) float64 {
var up float64
var down float64
if v > rsi.lastV {
up = v - rsi.lastV
} else if v < rsi.lastV {
down = rsi.lastV - v
}
rsi.emaUp.Add(up)
rsi.emaDown.Add(down)
rsi.lastV = v
return rsi.Last()
}

View File

@ -0,0 +1,23 @@
package gota
import "testing"
func TestRSI(t *testing.T) {
list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
// expList is generated by the following code:
// expList, _ := talib.Rsi(list, 10, nil)
expList := []float64{100, 100, 100, 100, 100, 90, 81, 72.89999999999999, 65.61, 59.04899999999999, 53.144099999999995, 47.82969, 43.04672099999999, 38.74204889999999, 34.86784400999999, 31.381059608999994, 28.242953648099995, 25.418658283289997, 22.876792454961}
rsi := NewRSI(10, WarmSMA)
var actList []float64
for _, v := range list {
if vOut := rsi.Add(v); rsi.Warmed() {
actList = append(actList, vOut)
}
}
if diff := diffFloats(expList, actList, 0.0000001); diff != "" {
t.Errorf("unexpected floats:\n%s", diff)
}
}

Some files were not shown because too many files have changed in this diff Show More