fix: typos (#19734)
parent
db97f1c1f6
commit
7dcaf5c639
|
@ -19,7 +19,7 @@ func (a *AuthorizerMock) AuthorizeDatabase(p influxql.Privilege, name string) bo
|
|||
return a.AuthorizeDatabaseFn(p, name)
|
||||
}
|
||||
|
||||
// AuthorizeQuery determins if the query can be executed against the provided
|
||||
// AuthorizeQuery determines if the query can be executed against the provided
|
||||
// database.
|
||||
func (a *AuthorizerMock) AuthorizeQuery(database string, query *influxql.Query) error {
|
||||
return a.AuthorizeQueryFn(database, query)
|
||||
|
|
|
@ -28,7 +28,7 @@ type TSDBStoreMock struct {
|
|||
DiskSizeFn func() (int64, error)
|
||||
ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error)
|
||||
ImportShardFn func(id uint64, r io.Reader) error
|
||||
MeasurementSeriesCountsFn func(database string) (measuments int, series int)
|
||||
MeasurementSeriesCountsFn func(database string) (measurements int, series int)
|
||||
MeasurementsCardinalityFn func(database string) (int64, error)
|
||||
MeasurementNamesFn func(auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error)
|
||||
OpenFn func() error
|
||||
|
@ -95,7 +95,7 @@ func (s *TSDBStoreMock) ImportShard(id uint64, r io.Reader) error {
|
|||
func (s *TSDBStoreMock) MeasurementNames(auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) {
|
||||
return s.MeasurementNamesFn(auth, database, cond)
|
||||
}
|
||||
func (s *TSDBStoreMock) MeasurementSeriesCounts(database string) (measuments int, series int) {
|
||||
func (s *TSDBStoreMock) MeasurementSeriesCounts(database string) (measurements int, series int) {
|
||||
return s.MeasurementSeriesCountsFn(database)
|
||||
}
|
||||
func (s *TSDBStoreMock) MeasurementsCardinality(database string) (int64, error) {
|
||||
|
|
|
@ -501,7 +501,7 @@ func DecodeBytesBigEndian(dst []uint64, src []byte) (value int, err error) {
|
|||
return j, nil
|
||||
}
|
||||
|
||||
// canPack returs true if n elements from in can be stored using bits per element
|
||||
// canPack returns true if n elements from in can be stored using bits per element
|
||||
func canPack(src []uint64, n, bits int) bool {
|
||||
if len(src) < n {
|
||||
return false
|
||||
|
|
|
@ -85,7 +85,7 @@ func (p *LimitedBytes) Get(sz int) []byte {
|
|||
|
||||
// Put returns a slice back to the pool. If the pool is full, the byte
|
||||
// slice is discarded. If the byte slice is over the configured max size
|
||||
// of any byte slice in the pool, it is discared.
|
||||
// of any byte slice in the pool, it is discarded.
|
||||
func (p *LimitedBytes) Put(c []byte) {
|
||||
// Drop buffers that are larger than the max size
|
||||
if cap(c) >= p.maxSize {
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
)
|
||||
|
||||
// BooleanArrayEncodeAll encodes src into b, returning b and any error encountered.
|
||||
// The returned slice may be of a different length and capactity to b.
|
||||
// The returned slice may be of a different length and capacity to b.
|
||||
func BooleanArrayEncodeAll(src []bool, b []byte) ([]byte, error) {
|
||||
sz := 1 + 8 + ((len(src) + 7) / 8) // Header + Num bools + bool data.
|
||||
if len(b) < sz && cap(b) > sz {
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
)
|
||||
|
||||
// FloatArrayEncodeAll encodes src into b, returning b and any error encountered.
|
||||
// The returned slice may be of a different length and capactity to b.
|
||||
// The returned slice may be of a different length and capacity to b.
|
||||
//
|
||||
// Currently only the float compression scheme used in Facebook's Gorilla is
|
||||
// supported, so this method implements a batch oriented version of that.
|
||||
|
@ -106,7 +106,7 @@ func FloatArrayEncodeAll(src []float64, b []byte) ([]byte, error) {
|
|||
}
|
||||
|
||||
// Full value to write.
|
||||
v := (vDelta >> prevTrailing) << (64 - l) // l least signifciant bits of v.
|
||||
v := (vDelta >> prevTrailing) << (64 - l) // l least significant bits of v.
|
||||
|
||||
var m = n & 7 // Current bit in current byte.
|
||||
var written uint64
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
)
|
||||
|
||||
// IntegerArrayEncodeAll encodes src into b, returning b and any error encountered.
|
||||
// The returned slice may be of a different length and capactity to b.
|
||||
// The returned slice may be of a different length and capacity to b.
|
||||
//
|
||||
// IntegerArrayEncodeAll implements batch oriented versions of the three integer
|
||||
// encoding types we support: uncompressed, simple8b and RLE.
|
||||
|
@ -26,7 +26,7 @@ func IntegerArrayEncodeAll(src []int64, b []byte) ([]byte, error) {
|
|||
|
||||
// To prevent an allocation of the entire block we're encoding reuse the
|
||||
// src slice to store the encoded deltas.
|
||||
deltas := reintepretInt64ToUint64Slice(src)
|
||||
deltas := reinterpretInt64ToUint64Slice(src)
|
||||
for i := len(deltas) - 1; i > 0; i-- {
|
||||
deltas[i] = deltas[i] - deltas[i-1]
|
||||
deltas[i] = ZigZagEncode(int64(deltas[i]))
|
||||
|
@ -115,7 +115,7 @@ func IntegerArrayEncodeAll(src []int64, b []byte) ([]byte, error) {
|
|||
}
|
||||
|
||||
// UnsignedArrayEncodeAll encodes src into b, returning b and any error encountered.
|
||||
// The returned slice may be of a different length and capactity to b.
|
||||
// The returned slice may be of a different length and capacity to b.
|
||||
//
|
||||
// UnsignedArrayEncodeAll implements batch oriented versions of the three integer
|
||||
// encoding types we support: uncompressed, simple8b and RLE.
|
||||
|
@ -124,7 +124,7 @@ func IntegerArrayEncodeAll(src []int64, b []byte) ([]byte, error) {
|
|||
// scratch space for delta encoded values. It is NOT SAFE to use src after
|
||||
// passing it into IntegerArrayEncodeAll.
|
||||
func UnsignedArrayEncodeAll(src []uint64, b []byte) ([]byte, error) {
|
||||
srcint := reintepretUint64ToInt64Slice(src)
|
||||
srcint := reinterpretUint64ToInt64Slice(src)
|
||||
return IntegerArrayEncodeAll(srcint, b)
|
||||
}
|
||||
|
||||
|
@ -160,8 +160,8 @@ func UnsignedArrayDecodeAll(b []byte, dst []uint64) ([]uint64, error) {
|
|||
encoding = 3 // integerBatchDecodeAllInvalid
|
||||
}
|
||||
|
||||
res, err := integerBatchDecoderFunc[encoding&3](b, reintepretUint64ToInt64Slice(dst))
|
||||
return reintepretInt64ToUint64Slice(res), err
|
||||
res, err := integerBatchDecoderFunc[encoding&3](b, reinterpretUint64ToInt64Slice(dst))
|
||||
return reinterpretInt64ToUint64Slice(res), err
|
||||
}
|
||||
|
||||
func integerBatchDecodeAllUncompressed(b []byte, dst []int64) ([]int64, error) {
|
||||
|
@ -208,7 +208,7 @@ func integerBatchDecodeAllSimple(b []byte, dst []int64) ([]int64, error) {
|
|||
dst[0] = ZigZagDecode(binary.BigEndian.Uint64(b))
|
||||
|
||||
// decode compressed values
|
||||
buf := reintepretInt64ToUint64Slice(dst)
|
||||
buf := reinterpretInt64ToUint64Slice(dst)
|
||||
n, err := simple8b.DecodeBytesBigEndian(buf[1:], b[8:])
|
||||
if err != nil {
|
||||
return []int64{}, err
|
||||
|
@ -281,10 +281,10 @@ func integerBatchDecodeAllInvalid(b []byte, _ []int64) ([]int64, error) {
|
|||
return []int64{}, fmt.Errorf("unknown encoding %v", b[0]>>4)
|
||||
}
|
||||
|
||||
func reintepretInt64ToUint64Slice(src []int64) []uint64 {
|
||||
func reinterpretInt64ToUint64Slice(src []int64) []uint64 {
|
||||
return *(*[]uint64)(unsafe.Pointer(&src))
|
||||
}
|
||||
|
||||
func reintepretUint64ToInt64Slice(src []uint64) []int64 {
|
||||
func reinterpretUint64ToInt64Slice(src []uint64) []int64 {
|
||||
return *(*[]int64)(unsafe.Pointer(&src))
|
||||
}
|
||||
|
|
|
@ -20,11 +20,11 @@ var (
|
|||
)
|
||||
|
||||
// StringArrayEncodeAll encodes src into b, returning b and any error encountered.
|
||||
// The returned slice may be of a different length and capactity to b.
|
||||
// The returned slice may be of a different length and capacity to b.
|
||||
//
|
||||
// Currently only the string compression scheme used snappy.
|
||||
func StringArrayEncodeAll(src []string, b []byte) ([]byte, error) {
|
||||
srcSz64 := int64(2 + len(src)*binary.MaxVarintLen32) // strings should't be longer than 64kb
|
||||
srcSz64 := int64(2 + len(src)*binary.MaxVarintLen32) // strings shouldn't be longer than 64kb
|
||||
for i := range src {
|
||||
srcSz64 += int64(len(src[i]))
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ func TimeArrayEncodeAll(src []int64, b []byte) ([]byte, error) {
|
|||
|
||||
// To prevent an allocation of the entire block we're encoding reuse the
|
||||
// src slice to store the encoded deltas.
|
||||
deltas := reintepretInt64ToUint64Slice(src)
|
||||
deltas := reinterpretInt64ToUint64Slice(src)
|
||||
|
||||
if len(deltas) > 1 {
|
||||
for i := len(deltas) - 1; i > 0; i-- {
|
||||
|
|
|
@ -3,7 +3,7 @@ package tsm1
|
|||
// Compactions are the process of creating read-optimized TSM files.
|
||||
// The files are created by converting write-optimized WAL entries
|
||||
// to read-optimized TSM format. They can also be created from existing
|
||||
// TSM files when there are tombstone records that neeed to be removed, points
|
||||
// TSM files when there are tombstone records that need to be removed, points
|
||||
// that were overwritten by later writes and need to updated, or multiple
|
||||
// smaller TSM files need to be merged to reduce file counts and improve
|
||||
// compression ratios.
|
||||
|
@ -110,7 +110,7 @@ type DefaultPlanner struct {
|
|||
// compactFullWriteColdDuration specifies the length of time after
|
||||
// which if no writes have been committed to the WAL, the engine will
|
||||
// do a full compaction of the TSM files in this shard. This duration
|
||||
// should always be greater than the CacheFlushWriteColdDuraion
|
||||
// should always be greater than the CacheFlushWriteColdDuration
|
||||
compactFullWriteColdDuration time.Duration
|
||||
|
||||
// lastPlanCheck is the last time Plan was called
|
||||
|
@ -255,7 +255,7 @@ func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup {
|
|||
cur := generations[i]
|
||||
|
||||
// See if this generation is orphan'd which would prevent it from being further
|
||||
// compacted until a final full compactin runs.
|
||||
// compacted until a final full compaction runs.
|
||||
if i < len(generations)-1 {
|
||||
if cur.level() < generations[i+1].level() {
|
||||
currentGen = append(currentGen, cur)
|
||||
|
@ -356,7 +356,7 @@ func (c *DefaultPlanner) PlanOptimize() []CompactionGroup {
|
|||
}
|
||||
|
||||
// See if this generation is orphan'd which would prevent it from being further
|
||||
// compacted until a final full compactin runs.
|
||||
// compacted until a final full compaction runs.
|
||||
if i < len(generations)-1 {
|
||||
if cur.level() < generations[i+1].level() {
|
||||
currentGen = append(currentGen, cur)
|
||||
|
|
|
@ -2,7 +2,7 @@ package tsm1
|
|||
|
||||
/*
|
||||
This code is originally from: https://github.com/dgryski/go-tsz and has been modified to remove
|
||||
the timestamp compression fuctionality.
|
||||
the timestamp compression functionality.
|
||||
|
||||
It implements the float compression as presented in: http://www.vldb.org/pvldb/vol8/p1816-teller.pdf.
|
||||
This implementation uses a sentinel value of NaN which means that float64 NaN cannot be stored using
|
||||
|
|
|
@ -1905,7 +1905,7 @@ func mustMakeIndex(tb testing.TB, keys, blocks int) *indirectIndex {
|
|||
|
||||
indirect := NewIndirectIndex()
|
||||
if err = indirect.UnmarshalBinary(bytes); err != nil {
|
||||
tb.Fatalf("unexpected error unmarshaling index: %v", err)
|
||||
tb.Fatalf("unexpected error unmarshalling index: %v", err)
|
||||
}
|
||||
|
||||
return indirect
|
||||
|
|
|
@ -407,7 +407,7 @@ func CountTimestamps(b []byte) int {
|
|||
case timeCompressedPackedSimple:
|
||||
// First 9 bytes are the starting timestamp and scaling factor, skip over them
|
||||
count, _ := simple8b.CountBytes(b[9:])
|
||||
return count + 1 // +1 is for the first uncompressed timestamp, starting timestamep in b[1:9]
|
||||
return count + 1 // +1 is for the first uncompressed timestamp, starting timestamp in b[1:9]
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
|
|
|
@ -467,7 +467,7 @@ type File interface {
|
|||
MeasurementsSketches() (s, t estimator.Sketch, err error)
|
||||
SeriesSketches() (s, t estimator.Sketch, err error)
|
||||
|
||||
// Bitmap series existance.
|
||||
// Bitmap series existence.
|
||||
SeriesIDSet() (*tsdb.SeriesIDSet, error)
|
||||
TombstoneSeriesIDSet() (*tsdb.SeriesIDSet, error)
|
||||
|
||||
|
|
|
@ -401,7 +401,7 @@ func ParseSeriesKey(data []byte) (name []byte, tags models.Tags) {
|
|||
}
|
||||
|
||||
// ParseSeriesKeyInto extracts the name and tags for data, parsing the tags into
|
||||
// dstTags, which is then returened.
|
||||
// dstTags, which is then returned.
|
||||
//
|
||||
// The returned dstTags may have a different length and capacity.
|
||||
func ParseSeriesKeyInto(data []byte, dstTags models.Tags) ([]byte, models.Tags) {
|
||||
|
|
Loading…
Reference in New Issue