chore(storage,tsdb): fix megacheck errors

pull/10616/head
Mark Rushakoff 2018-11-01 09:51:20 -07:00
parent fe84ff7357
commit 985c260af7
10 changed files with 9 additions and 77 deletions

View File

@ -320,8 +320,6 @@ func toComparisonOperator(o ast.OperatorKind) (datatypes.Node_Comparison, error)
}
}
var measurementRemap = map[string]string{"_measurement": "_name"}
// NodeToExpr transforms a predicate node to an influxql.Expr.
func NodeToExpr(node *datatypes.Node, remap map[string]string) (influxql.Expr, error) {
v := &nodeToExprVisitor{remap: remap}

View File

@ -17,7 +17,6 @@ import (
type indexSeriesCursor struct {
sqry storage.SeriesCursor
err error
tags models.Tags
cond influxql.Expr
row reads.SeriesRow
eof bool

View File

@ -2,6 +2,7 @@ package readservice
import (
"context"
"github.com/influxdata/platform/query/functions/outputs"
"github.com/influxdata/flux"
@ -17,7 +18,7 @@ import (
)
func NewProxyQueryService(engine *storage.Engine, bucketSvc platform.BucketService, orgSvc platform.OrganizationService, logger *zap.Logger) (query.ProxyQueryService, error) {
var ( // flux
var (
concurrencyQuota = 10
memoryBytesQuota = 1e6
)

View File

@ -220,8 +220,7 @@ func BenchmarkIndex_TagSets(b *testing.B) {
name := []byte("m4")
opt := query.IteratorOptions{Condition: influxql.MustParseExpr(`"tag5"::tag = 'value0'`)}
var ts func() ([]*query.TagSet, error)
ts = func() ([]*query.TagSet, error) {
ts := func() ([]*query.TagSet, error) {
return idx.Index.TagSets(name, opt)
}

View File

@ -513,12 +513,4 @@ func SeriesKeySize(name []byte, tags models.Tags) int {
return n
}
type seriesKeys [][]byte
func (a seriesKeys) Len() int { return len(a) }
func (a seriesKeys) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a seriesKeys) Less(i, j int) bool {
return CompareSeriesKeys(a[i], a[j]) == -1
}
func nop() {}

View File

@ -12,7 +12,7 @@ const (
seriesIDValueMask = 0xFFFFFFFF // series ids numerically are 32 bits
seriesIDTypeShift = 32 // we put the type right after the value info
seriesIDTypeMask = 0xFF << seriesIDTypeShift // a mask for the type byte
seriesIDSize = 8
seriesIDSize = 8 //lint:ignore U1000 This const is used in a discarded compile-time type assertion.
)
// SeriesID is the type of a series id. It is logically a uint64, but encoded as a struct so

View File

@ -251,8 +251,9 @@ func ReadMeasurementBlockTrailer(data []byte) (MeasurementBlockTrailer, error) {
t.HashIndex.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]
t.HashIndex.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]
// Skip over old sketch info
buf = buf[4*8:]
// We would advance past old sketch info, but that's unused now.
_ = buf
// buf = buf[4*8:]
return t, nil
}

View File

@ -118,13 +118,6 @@ func dumpBufs(a, b []byte) {
fmt.Println()
}
func dumpBuf(b []byte) {
for i, v := range b {
fmt.Printf("%d %08b\n", i, v)
}
fmt.Println()
}
func TestFloatArrayEncodeAll_NaN(t *testing.T) {
examples := [][]float64{
{1.0, math.NaN(), 2.0},
@ -179,7 +172,7 @@ func Test_FloatArrayEncodeAll_Quick(t *testing.T) {
t.Fatalf("unexpected error: %v", err)
}
if got, exp := result, src[:len(src)]; !reflect.DeepEqual(got, exp) {
if got, exp := result, src[:]; !reflect.DeepEqual(got, exp) {
t.Fatalf("got result %v, expected %v", got, exp)
}
return true

View File

@ -34,13 +34,6 @@ func dumpBufs(a, b []byte) {
fmt.Println()
}
func dumpBuf(b []byte) {
for i, v := range b {
fmt.Printf("%[1]d %08[2]b (%[2]d)\n", i, v)
}
fmt.Println()
}
func TestIntegerArrayEncodeAll_NoValues(t *testing.T) {
b, err := IntegerArrayEncodeAll(nil, nil)
if err != nil {
@ -650,9 +643,7 @@ func TestIntegerArrayEncodeAll_Quick(t *testing.T) {
// Copy over values to compare result—src is modified...
exp := make([]int64, 0, len(src))
for _, v := range src {
exp = append(exp, v)
}
exp = append(exp, src...)
// Retrieve encoded bytes from encoder.
b, err := IntegerArrayEncodeAll(src, nil)

View File

@ -641,48 +641,6 @@ func (e *Engine) Free() error {
return e.FileStore.Free()
}
// addToIndexFromKey will pull the measurement names, series keys, and field
// names from composite keys, and add them to the database index and measurement
// fields.
func (e *Engine) addToIndexFromKey(keys [][]byte, fieldTypes []influxql.DataType) error {
collection := &tsdb.SeriesCollection{
Keys: keys,
Names: make([][]byte, 0, len(keys)),
Tags: make([]models.Tags, 0, len(keys)),
Types: make([]models.FieldType, 0, len(keys)),
}
for i := 0; i < len(keys); i++ {
// Replace tsm key format with index key format.
collection.Keys[i], _ = SeriesAndFieldFromCompositeKey(collection.Keys[i])
name := models.ParseName(collection.Keys[i])
collection.Names = append(collection.Names, name)
collection.Tags = append(collection.Tags, models.ParseTags(keys[i]))
collection.Types = append(collection.Types, fieldTypeFromDataType(fieldTypes[i]))
}
if err := e.index.CreateSeriesListIfNotExists(collection); err != nil {
return err
}
return nil
}
func fieldTypeFromDataType(dataType influxql.DataType) models.FieldType {
switch dataType {
case influxql.Float:
return models.Float
case influxql.Integer:
return models.Integer
case influxql.String:
return models.String
case influxql.Boolean:
return models.Boolean
default:
return models.Empty
}
}
// WritePoints writes metadata and point data into the engine.
// It returns an error if new points are added to an existing key.
func (e *Engine) WritePoints(points []models.Point) error {