chore: upgrade Go to v1.19.3 (1.x) (#23941)

* chore: upgrade Go to 1.19.3

This re-runs ./generate.sh and ./checkfmt.sh to format and update
source code (this is primarily responsible for the huge diff.)

* fix: update tests to reflect sorting algorithm change
pull/23970/head
Brandon Pfeifer 2022-11-28 12:15:47 -05:00 committed by GitHub
parent e68b64c57b
commit e484c4d871
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
43 changed files with 141 additions and 159 deletions

View File

@ -6,7 +6,7 @@ parameters:
# when updating the go version, should also update the go version in go.mod
description: docker tag for cross build container from quay.io . Created by https://github.com/influxdata/edge/tree/master/dockerfiles/cross-builder .
type: string
default: go1.18.7-f2a580ca8029f26f2c8a2002d6851967808bf96d
default: go1.19.3-7bc83382e2fdcefe13a8bf3e0367745901c0a790
workflow:
type: string

View File

@ -1461,7 +1461,6 @@ func (f *formatter) valueBuf(i, j int, typ flux.ColType, cr flux.ColReader) []by
// * common tags sorted by label
// * other tags sorted by label
// * value
//
type orderedCols struct {
indexMap []int
cols []flux.ColMeta

View File

@ -22,7 +22,7 @@ func (el *ErrorList) Add(err error) {
el.errs = append(el.errs, err)
}
//Err returns whether or not an error list is an error.
// Err returns whether or not an error list is an error.
func (el *ErrorList) Err() error {
if len(el.errs) == 0 {
return nil

View File

@ -280,11 +280,11 @@ type Options struct {
// GetConfigPath returns the config path from the options.
// It will return a path by searching in this order:
// 1. The CLI option in ConfigPath
// 2. The environment variable INFLUXDB_CONFIG_PATH
// 3. The first influxdb.conf file on the path:
// - ~/.influxdb
// - /etc/influxdb
// 1. The CLI option in ConfigPath
// 2. The environment variable INFLUXDB_CONFIG_PATH
// 3. The first influxdb.conf file on the path:
// - ~/.influxdb
// - /etc/influxdb
func (opt *Options) GetConfigPath() string {
if opt.ConfigPath != "" {
if opt.ConfigPath == os.DevNull {

View File

@ -255,8 +255,8 @@ func (l sgList) Covers(t time.Time) bool {
// to start time. Therefore, if there are multiple shard groups that match
// this point's time they will be preferred in this order:
//
// - a shard group with the earliest end time;
// - (assuming identical end times) the shard group with the earliest start time.
// - a shard group with the earliest end time;
// - (assuming identical end times) the shard group with the earliest start time.
func (l sgList) ShardGroupAt(t time.Time) *meta.ShardGroupInfo {
if l.items.Len() == 0 {
return nil

View File

@ -671,10 +671,8 @@ func (SortedPivotRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bo
return pn, false, nil
}
//
// Push Down of window aggregates.
// ReadRangePhys |> window |> { min, max, mean, count, sum }
//
type PushDownWindowAggregateRule struct{}
func (PushDownWindowAggregateRule) Name() string {
@ -863,10 +861,8 @@ func (p PushDownBareAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (p
}), true, nil
}
//
// Push Down of group aggregates.
// ReadGroupPhys |> { count }
//
type PushDownGroupAggregateRule struct{}
func (PushDownGroupAggregateRule) Name() string {

View File

@ -1170,9 +1170,7 @@ func meanProcedureSpec() *universe.MeanProcedureSpec {
}
}
//
// Window Aggregate Testing
//
func TestPushDownWindowAggregateRule(t *testing.T) {
createRangeSpec := func() *influxdb.ReadRangePhysSpec {
return &influxdb.ReadRangePhysSpec{

2
go.mod
View File

@ -1,6 +1,6 @@
module github.com/influxdata/influxdb
go 1.18
go 1.19
require (
collectd.org v0.3.0

View File

@ -39,24 +39,31 @@ const (
// further help operators.
//
// To create a simple error,
// &Error{
// Code:ENotFound,
// }
//
// &Error{
// Code:ENotFound,
// }
//
// To show where the error happens, add Op.
// &Error{
// Code: ENotFound,
// Op: "bolt.FindUserByID"
// }
//
// &Error{
// Code: ENotFound,
// Op: "bolt.FindUserByID"
// }
//
// To show an error with a unpredictable value, add the value in Msg.
// &Error{
// Code: EConflict,
// Message: fmt.Sprintf("organization with name %s already exist", aName),
// }
//
// &Error{
// Code: EConflict,
// Message: fmt.Sprintf("organization with name %s already exist", aName),
// }
//
// To show an error wrapped with another error.
// &Error{
// Code:EInternal,
// Err: err,
// }.
//
// &Error{
// Code:EInternal,
// Err: err,
// }.
type Error struct {
Code string
Msg string

View File

@ -19,7 +19,8 @@ import (
// LogError adds a span log for an error.
// Returns unchanged error, so useful to wrap as in:
// return 0, tracing.LogError(err)
//
// return 0, tracing.LogError(err)
func LogError(span opentracing.Span, err error) error {
if err == nil {
return nil
@ -115,24 +116,25 @@ func (s *Span) Finish() {
// Context without parent span reference triggers root span construction.
// This function never returns nil values.
//
// Performance
// # Performance
//
// This function incurs a small performance penalty, roughly 1000 ns/op, 376 B/op, 6 allocs/op.
// Jaeger timestamp and duration precision is only µs, so this is pretty negligible.
//
// Alternatives
// # Alternatives
//
// If this performance penalty is too much, try these, which are also demonstrated in benchmark tests:
// // Create a root span
// span := opentracing.StartSpan("operation name")
// ctx := opentracing.ContextWithSpan(context.Background(), span)
//
// // Create a child span
// span := opentracing.StartSpan("operation name", opentracing.ChildOf(sc))
// ctx := opentracing.ContextWithSpan(context.Background(), span)
// // Create a root span
// span := opentracing.StartSpan("operation name")
// ctx := opentracing.ContextWithSpan(context.Background(), span)
//
// // Sugar to create a child span
// span, ctx := opentracing.StartSpanFromContext(ctx, "operation name")
// // Create a child span
// span := opentracing.StartSpan("operation name", opentracing.ChildOf(sc))
// ctx := opentracing.ContextWithSpan(context.Background(), span)
//
// // Sugar to create a child span
// span, ctx := opentracing.StartSpanFromContext(ctx, "operation name")
func StartSpanFromContext(ctx context.Context, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) {
if ctx == nil {
panic("StartSpanFromContext called with nil context")

View File

@ -23,9 +23,9 @@ func (f ClientFunc) Diagnostics() (*Diagnostics, error) {
// the values for each column, by row. This information is never written to an InfluxDB
// system and is display-only. An example showing, say, connections follows:
//
// source_ip source_port dest_ip dest_port
// 182.1.0.2 2890 127.0.0.1 38901
// 174.33.1.2 2924 127.0.0.1 38902
// source_ip source_port dest_ip dest_port
// 182.1.0.2 2890 127.0.0.1 38901
// 174.33.1.2 2924 127.0.0.1 38902
type Diagnostics struct {
Columns []string
Rows [][]interface{}

View File

@ -468,8 +468,9 @@ func Decode(dst *[240]uint64, v uint64) (n int, err error) {
// Decode writes the uncompressed values from src to dst. It returns the number
// of values written or an error.
//go:nocheckptr
// nocheckptr while the underlying struct layout doesn't change
//
//go:nocheckptr
func DecodeAll(dst, src []uint64) (value int, err error) {
j := 0
for _, v := range src {
@ -482,8 +483,9 @@ func DecodeAll(dst, src []uint64) (value int, err error) {
// DecodeBytesBigEndian writes the compressed, big-endian values from src to dst. It returns the number
// of values written or an error.
//go:nocheckptr
// nocheckptr while the underlying struct layout doesn't change
//
//go:nocheckptr
func DecodeBytesBigEndian(dst []uint64, src []byte) (value int, err error) {
if len(src)&7 != 0 {
return 0, errors.New("src length is not multiple of 8")

View File

@ -2,11 +2,12 @@ package errors
// Capture is a wrapper function which can be used to capture errors from closing via a defer.
// An example:
// func Example() (err error) {
// f, _ := os.Open(...)
// defer errors.Capture(&err, f.Close)()
// ...
// return
//
// func Example() (err error) {
// f, _ := os.Open(...)
// defer errors.Capture(&err, f.Close)()
// ...
// return
//
// Doing this will result in the error from the f.Close() call being
// put in the error via a ptr, if the error is not nil

View File

@ -4,10 +4,10 @@
//
// The differences are that the implementation in this package:
//
// * uses an AMD64 optimised xxhash algorithm instead of murmur;
// * uses some AMD64 optimisations for things like clz;
// * works with []byte rather than a Hash64 interface, to reduce allocations;
// * implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler
// - uses an AMD64 optimised xxhash algorithm instead of murmur;
// - uses some AMD64 optimisations for things like clz;
// - works with []byte rather than a Hash64 interface, to reduce allocations;
// - implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler
//
// Based on some rough benchmarking, this implementation of HyperLogLog++ is
// around twice as fast as the github.com/clarkduvall/hyperloglog implementation.

View File

@ -62,7 +62,7 @@ func StreamFile(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Write
return StreamRenameFile(f, f.Name(), shardRelativePath, fullPath, tw)
}
/// Stream a single file to tw, using tarHeaderFileName instead of the actual filename
// / Stream a single file to tw, using tarHeaderFileName instead of the actual filename
// e.g., when we want to write a *.tmp file using the original file's non-tmp name.
func StreamRenameFile(f os.FileInfo, tarHeaderFileName, relativePath, fullPath string, tw *tar.Writer) error {
h, err := tar.FileInfoHeader(f, f.Name())

View File

@ -3,7 +3,7 @@ Package tracing provides a way for capturing hierarchical traces.
To start a new trace with a root span named select
trace, span := tracing.NewTrace("select")
trace, span := tracing.NewTrace("select")
It is recommended that a span be forwarded to callees using the
context package. Firstly, create a new context with the span associated
@ -21,6 +21,5 @@ Once the trace is complete, it may be converted to a graph with the Tree method.
The tree is intended to be used with the Walk function in order to generate
different presentations. The default Tree#String method returns a tree.
*/
package tracing

View File

@ -50,7 +50,7 @@ func Bool(key string, val bool) Field {
}
}
/// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
// / Int64 adds an int64-valued key:value pair to a Span.LogFields() record
func Int64(key string, val int64) Field {
return Field{
key: key,

View File

@ -1,6 +1,5 @@
/*
Package wire is used to serialize a trace.
*/
package wire

View File

@ -276,6 +276,7 @@ type Field struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
FieldType FieldType `protobuf:"varint,2,opt,name=FieldType,proto3,enum=wire.FieldType" json:"FieldType,omitempty"`
// Types that are assignable to Value:
//
// *Field_NumericVal
// *Field_StringVal
Value isField_Value `protobuf_oneof:"value"`

View File

@ -60,9 +60,11 @@ func (e *NoContentEncoder) Encode(w io.Writer, results flux.ResultIterator) (int
// Otherwise one can decode the response body to get the error. For example:
// ```
// _, err = csv.NewResultDecoder(csv.ResultDecoderConfig{}).Decode(bytes.NewReader(res))
// if err != nil {
// // we got some runtime error
// }
//
// if err != nil {
// // we got some runtime error
// }
//
// ```
type NoContentWithErrorDialect struct {
csv.ResultEncoderConfig

View File

@ -1212,8 +1212,8 @@ func (r *UnsignedCumulativeSumReducer) Emit() []UnsignedPoint {
// FloatHoltWintersReducer forecasts a series into the future.
// This is done using the Holt-Winters damped method.
// 1. Using the series the initial values are calculated using a SSE.
// 2. The series is forecasted into the future using the iterative relations.
// 1. Using the series the initial values are calculated using a SSE.
// 2. The series is forecasted into the future using the iterative relations.
type FloatHoltWintersReducer struct {
// Season period
m int

View File

@ -403,11 +403,10 @@ func (itr *floatSortedMergeIterator) pop() (*FloatPoint, error) {
// floatSortedMergeHeap represents a heap of floatSortedMergeHeapItems.
// Items are sorted with the following priority:
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
//
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
type floatSortedMergeHeap struct {
opt IteratorOptions
items []*floatSortedMergeHeapItem
@ -3067,11 +3066,10 @@ func (itr *integerSortedMergeIterator) pop() (*IntegerPoint, error) {
// integerSortedMergeHeap represents a heap of integerSortedMergeHeapItems.
// Items are sorted with the following priority:
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
//
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
type integerSortedMergeHeap struct {
opt IteratorOptions
items []*integerSortedMergeHeapItem
@ -5731,11 +5729,10 @@ func (itr *unsignedSortedMergeIterator) pop() (*UnsignedPoint, error) {
// unsignedSortedMergeHeap represents a heap of unsignedSortedMergeHeapItems.
// Items are sorted with the following priority:
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
//
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
type unsignedSortedMergeHeap struct {
opt IteratorOptions
items []*unsignedSortedMergeHeapItem
@ -8395,11 +8392,10 @@ func (itr *stringSortedMergeIterator) pop() (*StringPoint, error) {
// stringSortedMergeHeap represents a heap of stringSortedMergeHeapItems.
// Items are sorted with the following priority:
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
//
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
type stringSortedMergeHeap struct {
opt IteratorOptions
items []*stringSortedMergeHeapItem
@ -11045,11 +11041,10 @@ func (itr *booleanSortedMergeIterator) pop() (*BooleanPoint, error) {
// booleanSortedMergeHeap represents a heap of booleanSortedMergeHeapItems.
// Items are sorted with the following priority:
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
//
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
type booleanSortedMergeHeap struct {
opt IteratorOptions
items []*booleanSortedMergeHeapItem

View File

@ -396,16 +396,18 @@ type nodes []*node
// less than a non-wildcard value.
//
// For example, the filters:
// "*.*"
// "servers.*"
// "servers.localhost"
// "*.localhost"
//
// "*.*"
// "servers.*"
// "servers.localhost"
// "*.localhost"
//
// Would be sorted as:
// "servers.localhost"
// "servers.*"
// "*.localhost"
// "*.*"
//
// "servers.localhost"
// "servers.*"
// "*.localhost"
// "*.*"
func (n *nodes) Less(j, k int) bool {
if (*n)[j].value == "*" && (*n)[k].value != "*" {
return false

View File

@ -872,7 +872,6 @@ func (h *Handler) async(q *influxql.Query, results <-chan *query.Result) {
// in the database URL query value. It is encoded using a forward slash like
// "database/retentionpolicy" and we should be able to simply split that string
// on the forward slash.
//
func bucket2dbrp(bucket string) (string, string, error) {
// test for a slash in our bucket name.
switch idx := strings.IndexByte(bucket, '/'); idx {

View File

@ -35,17 +35,17 @@ func (h *Handler) handleProfiles(w http.ResponseWriter, r *http.Request) {
}
// archiveProfilesAndQueries collects the following profiles:
// - goroutine profile
// - heap profile
// - blocking profile
// - mutex profile
// - (optionally) CPU profile
// - goroutine profile
// - heap profile
// - blocking profile
// - mutex profile
// - (optionally) CPU profile
//
// It also collects the following query results:
//
// - SHOW SHARDS
// - SHOW STATS
// - SHOW DIAGNOSTICS
// - SHOW SHARDS
// - SHOW STATS
// - SHOW DIAGNOSTICS
//
// All information is added to a tar archive and then compressed, before being
// returned to the requester as an archive file. Where profiles support debug
@ -60,7 +60,6 @@ func (h *Handler) handleProfiles(w http.ResponseWriter, r *http.Request) {
//
// The value after the `cpu` query parameter is not actually important, as long
// as there is something there.
//
func (h *Handler) archiveProfilesAndQueries(w http.ResponseWriter, r *http.Request) {
// prof describes a profile name and a debug value, or in the case of a CPU
// profile, the number of seconds to collect the profile for.

View File

@ -78,7 +78,8 @@ func redactPassword(r *http.Request) {
// in addition to the common fields, we also append referrer, user agent,
// request ID and response time (microseconds)
// ie, in apache mod_log_config terms:
// %h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" %L %D
//
// %h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" %L %D
func buildLogLine(l *responseLogger, r *http.Request, start time.Time) string {
redactPassword(r)

View File

@ -4,7 +4,6 @@ import "testing"
// test of how we extract the database and retention policy from the bucket in
// our v2 api enpoint.
//
func TestV2DatabaseRetentionPolicyMapper(t *testing.T) {
tests := map[string]struct {
input string

View File

@ -209,7 +209,6 @@ func (c *Client) CreateDatabase(name string) (*DatabaseInfo, error) {
// This call is only idempotent when the caller provides the exact same
// retention policy, and that retention policy is already the default for the
// database.
//
func (c *Client) CreateDatabaseWithRetentionPolicy(name string, spec *RetentionPolicySpec) (*DatabaseInfo, error) {
if spec == nil {
return nil, errors.New("CreateDatabaseWithRetentionPolicy called with nil spec")

View File

@ -346,7 +346,8 @@ func (s *Service) handleConn(conn net.Conn) {
// handleTelnetConn accepts OpenTSDB's telnet protocol.
// Each telnet command consists of a line of the form:
// put sys.cpu.user 1356998400 42.5 host=webserver01 cpu=0
//
// put sys.cpu.user 1356998400 42.5 host=webserver01 cpu=0
func (s *Service) handleTelnetConn(conn net.Conn) {
defer conn.Close()
defer atomic.AddInt64(&s.stats.ActiveTelnetConnections, -1)

View File

@ -412,7 +412,6 @@ func (s *Service) writeRetentionPolicyInfo(conn net.Conn, database, retentionPol
// the json buffer and the conn.
//
// we return that buffer sans the newline at the beginning.
//
func (s *Service) readRequest(r io.Reader) (*Request, []byte, error) {
var req Request
d := json.NewDecoder(r)

View File

@ -33,10 +33,9 @@ func RewriteExprRemoveFieldKeyAndValue(expr influxql.Expr) influxql.Expr {
//
// This condition is determined when the following is true:
//
// * there is only one occurrence of the tag key `_measurement`.
// * there are no OR operators in the expression tree.
// * the operator for the `_measurement` binary expression is ==.
//
// - there is only one occurrence of the tag key `_measurement`.
// - there are no OR operators in the expression tree.
// - the operator for the `_measurement` binary expression is ==.
func HasSingleMeasurementNoOR(expr influxql.Expr) (string, bool) {
var lastMeasurement string
foundOnce := true

View File

@ -200,6 +200,7 @@ type Node struct {
NodeType Node_Type `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=influxdata.platform.storage.Node_Type" json:"node_type,omitempty"`
Children []*Node `protobuf:"bytes,2,rep,name=children,proto3" json:"children,omitempty"`
// Types that are assignable to Value:
//
// *Node_StringValue
// *Node_BooleanValue
// *Node_IntegerValue

View File

@ -1114,6 +1114,7 @@ type ReadResponse_Frame struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Data:
//
// *ReadResponse_Frame_Group
// *ReadResponse_Frame_Series
// *ReadResponse_Frame_FloatPoints
@ -1625,6 +1626,7 @@ type ReadResponse_AnyPoints struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Data:
//
// *ReadResponse_AnyPoints_Floats
// *ReadResponse_AnyPoints_Integers
// *ReadResponse_AnyPoints_Unsigneds

View File

@ -151,13 +151,13 @@ group:
group:
tag key : _m,tag0,tag1
partition key: val11
series: _m=cpu,tag0=val01,tag1=val11
series: _m=cpu,tag0=val00,tag1=val11
series: _m=cpu,tag0=val01,tag1=val11
group:
tag key : _m,tag0,tag1
partition key: val12
series: _m=cpu,tag0=val01,tag1=val12
series: _m=cpu,tag0=val00,tag1=val12
series: _m=cpu,tag0=val01,tag1=val12
group:
tag key : _m,tag0
partition key: <nil>
@ -347,8 +347,8 @@ group:
exp: `group:
tag key : _m,tag1,tag2
partition key: <nil>,val20
series: _m=mem,tag1=val11,tag2=val20
series: _m=mem,tag1=val10,tag2=val20
series: _m=mem,tag1=val11,tag2=val20
group:
tag key : _m,tag1,tag2
partition key: <nil>,val21
@ -356,10 +356,10 @@ group:
group:
tag key : _m,tag0,tag1
partition key: val00,<nil>
series: _m=aaa,tag0=val00
series: _m=cpu,tag0=val00,tag1=val10
series: _m=cpu,tag0=val00,tag1=val11
series: _m=cpu,tag0=val00,tag1=val12
series: _m=aaa,tag0=val00
group:
tag key : _m,tag0
partition key: val01,<nil>

View File

@ -1210,7 +1210,7 @@ func TestIndex_SeriesIDSet(t *testing.T) {
}
// Ensures that deleting series from TSM files with multiple fields removes all the
/// series
// / series
func TestEngine_DeleteSeries(t *testing.T) {
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {

View File

@ -29,7 +29,6 @@ const partitions = 16
//
// To determine the partition that a series key should be added to, the series
// key is hashed and the first 8 bits are used as an index to the ring.
//
type ring struct {
// The unique set of partitions in the ring.
// len(partitions) <= len(continuum)
@ -40,8 +39,7 @@ type ring struct {
// power of 2, and for performance reasons should be larger than the number of
// cores on the host. The supported set of values for n is:
//
// {1, 2, 4, 8, 16}.
//
// {1, 2, 4, 8, 16}.
func newring(n int) (*ring, error) {
if n <= 0 || n > partitions {
return nil, fmt.Errorf("invalid number of partitions: %d", n)

View File

@ -11,8 +11,8 @@ import (
const MaxFieldValueLength = 1048576
// ValidateFields will return a PartialWriteError if:
// - the point has inconsistent fields, or
// - the point has fields that are too long
// - the point has inconsistent fields, or
// - the point has fields that are too long
func ValidateFields(mf *MeasurementFields, point models.Point, skipSizeValidation bool) error {
pointSize := point.StringSize()
iter := point.FieldIterator()

View File

@ -1,9 +1,8 @@
/*
Package tsi1 provides a memory-mapped index implementation that supports
high cardinality series.
Overview
# Overview
The top-level object in tsi1 is the Index. It is the primary access point from
the rest of the system. The Index is composed of LogFile and IndexFile objects.
@ -17,8 +16,7 @@ Index files also contain series information, however, they are highly indexed
so that reads can be performed quickly. Index files are built through a process
called compaction where a log file or multiple index files are merged together.
Operations
# Operations
The index can perform many tasks related to series, measurement, & tag data.
All data is inserted by adding a series to the index. When adding a series,
@ -34,8 +32,7 @@ as by measurement name, by tag value, or by using regular expressions. The
index provides an API to iterate over subsets of series and perform set
operations such as unions and intersections.
Log File Layout
# Log File Layout
The write-ahead file that series initially are inserted into simply appends
all new operations sequentially. It is simply composed of a series of log
@ -61,15 +58,13 @@ name, the tag set, and a checksum.
When the log file is replayed, if the checksum is incorrect or the entry is
incomplete (because of a partially failed write) then the log is truncated.
Index File Layout
# Index File Layout
The index file is composed of 3 main block types: one series block, one or more
tag blocks, and one measurement block. At the end of the index file is a
trailer that records metadata such as the offsets to these blocks.
Series Block Layout
# Series Block Layout
The series block stores raw series keys in sorted order. It also provides hash
indexes so that series can be looked up quickly. Hash indexes are inserted
@ -111,8 +106,7 @@ a trailer which contains metadata about the block.
Tag Block Layout
# Tag Block Layout
After the series block is one or more tag blocks. One of these blocks exists
for every measurement in the index file. The block is structured as a sorted
@ -159,8 +153,7 @@ that value. Series iterators can be built around a single tag key value or
multiple iterators can be merged with set operators such as union or
intersection.
Measurement block
# Measurement block
The measurement block stores a sorted list of measurements, their associated
series offsets, and the offset to their tag block. This allows all series for
@ -188,8 +181,7 @@ measurements.
Manifest file
# Manifest file
The index is simply an ordered set of log and index files. These files can be
merged together or rewritten but their order must always be the same. This is
@ -200,8 +192,7 @@ Whenever the set of active files is changed, a manifest file is written to
track the set. The manifest specifies the ordering of files and, on startup,
all files not in the manifest are removed from the index directory.
Compacting index files
# Compacting index files
Compaction is the process of taking files and merging them together into a
single file. There are two stages of compaction within TSI.
@ -216,8 +207,7 @@ they are all merged together into a single index file and the old files are
discarded. Because all blocks are written in sorted order, the new index file
can be streamed and minimize memory use.
Concurrency
# Concurrency
Index files are immutable so they do not require fine grained locks, however,
compactions require that we track which files are in use so they are not
@ -232,7 +222,5 @@ returns to zero.
Besides the reference counting, there are no other locking mechanisms when
reading or writing index files. Log files, however, do require a lock whenever
they are accessed. This is another reason to minimize log file size.
*/
package tsi1

View File

@ -55,7 +55,6 @@ func init() {
//
// NOTE: Currently, this must not be change once a database is created. Further,
// it must also be a power of 2.
//
var DefaultPartitionN uint64 = 8
// An IndexOption is a functional option for changing the configuration of

View File

@ -643,7 +643,7 @@ func BenchmarkIndexSet_TagSets(b *testing.B) {
// This benchmark concurrently writes series to the index and fetches cached bitsets.
// The idea is to emphasize the performance difference when bitset caching is on and off.
//
// Typical results for an i7 laptop
// # Typical results for an i7 laptop
//
// BenchmarkIndex_ConcurrentWriteQuery/inmem/queries_100000/cache-8 1 5963346204 ns/op 2499655768 B/op 23964183 allocs/op
// BenchmarkIndex_ConcurrentWriteQuery/inmem/queries_100000/no_cache-8 1 5314841090 ns/op 2499495280 B/op 23963322 allocs/op

View File

@ -162,8 +162,6 @@ var set *SeriesIDSet
// BenchmarkSeriesIDSet_Add/10-4 5000000 348 ns/op 0 B/op 0 allocs/op
// BenchmarkSeriesIDSet_Add/100-4 5000000 373 ns/op 0 B/op 0 allocs/op
// BenchmarkSeriesIDSet_Add/1000-4 5000000 342 ns/op 0 B/op 0 allocs/op
//
//
func BenchmarkSeriesIDSet_AddMore(b *testing.B) {
cardinalities := []uint64{1, 2, 10, 100, 1000, 10000, 100000, 1000000, 10000000}
@ -202,7 +200,6 @@ func BenchmarkSeriesIDSet_AddMore(b *testing.B) {
// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/random_global_lock-8 2000000 914 ns/op 0 B/op 0 allocs/op
// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/same_multi_lock-8 30000000 39.7 ns/op 0 B/op 0 allocs/op
// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/random_multi_lock-8 1000000 1002 ns/op 0 B/op 0 allocs/op
//
func BenchmarkSeriesIDSet_Add(b *testing.B) {
// Setup...
set = NewSeriesIDSet()
@ -523,7 +520,6 @@ func BenchmarkSeriesIDSet_AddMany(b *testing.B) {
// BenchmarkSeriesIDSet_Remove/cardinality_1000000_remove_same-4 20000000 99.1 ns/op 0 B/op 0 allocs/op
// BenchmarkSeriesIDSet_Remove/cardinality_1000000_check_remove_global_lock-4 20000000 57.7 ns/op 0 B/op 0 allocs/op
// BenchmarkSeriesIDSet_Remove/cardinality_1000000_check_remove_multi_lock-4 20000000 80.1 ns/op 0 B/op 0 allocs/op
//
func BenchmarkSeriesIDSet_Remove(b *testing.B) {
// Setup...
set = NewSeriesIDSet()

View File

@ -209,7 +209,7 @@ func (s *Shard) SetEnabled(enabled bool) {
s.mu.Unlock()
}
//! setEnabledNoLock performs actual work of SetEnabled. Must hold s.mu before calling.
// ! setEnabledNoLock performs actual work of SetEnabled. Must hold s.mu before calling.
func (s *Shard) setEnabledNoLock(enabled bool) {
// Prevent writes and queries
s.enabled = enabled

View File

@ -1240,7 +1240,6 @@ func (s *Store) sketchesForDatabase(dbName string, getSketches func(*Shard) (est
//
// Cardinality is calculated exactly by unioning all shards' bitsets of series
// IDs. The result of this method cannot be combined with any other results.
//
func (s *Store) SeriesCardinality(ctx context.Context, database string) (int64, error) {
s.mu.RLock()
shards := s.filterShards(byDatabase(database))