influxdb/tsdb/index/tsi1/log_file.go

1338 lines
31 KiB
Go
Raw Normal View History

2016-09-02 14:52:11 +00:00
package tsi1
2016-10-21 15:31:40 +00:00
import (
"bufio"
2016-10-21 15:31:40 +00:00
"bytes"
"encoding/binary"
2016-11-11 16:25:53 +00:00
"errors"
2016-12-15 15:31:18 +00:00
"fmt"
2016-10-21 15:31:40 +00:00
"hash/crc32"
2016-10-25 14:36:58 +00:00
"io"
2016-10-21 15:31:40 +00:00
"os"
"sort"
"sync"
"time"
2016-10-21 15:31:40 +00:00
"github.com/influxdata/influxdb/models"
2017-04-25 16:26:45 +00:00
"github.com/influxdata/influxdb/pkg/bloom"
2016-11-28 22:12:22 +00:00
"github.com/influxdata/influxdb/pkg/estimator"
"github.com/influxdata/influxdb/pkg/estimator/hll"
2016-10-21 15:31:40 +00:00
"github.com/influxdata/influxdb/pkg/mmap"
"github.com/influxdata/influxdb/tsdb"
2016-11-11 16:25:53 +00:00
)
// Log errors.
var (
ErrLogEntryChecksumMismatch = errors.New("log entry checksum mismatch")
2016-10-21 15:31:40 +00:00
)
// Log entry flag constants.
const (
LogEntrySeriesTombstoneFlag = 0x01
LogEntryMeasurementTombstoneFlag = 0x02
LogEntryTagKeyTombstoneFlag = 0x04
LogEntryTagValueTombstoneFlag = 0x08
)
// LogFile represents an on-disk write-ahead log file.
type LogFile struct {
mu sync.RWMutex
wg sync.WaitGroup // ref count
id int // file sequence identifier
data []byte // mmap
file *os.File // writer
w *bufio.Writer // buffered writer
buf []byte // marshaling buffer
keyBuf []byte
2017-11-15 23:09:25 +00:00
sfile *tsdb.SeriesFile // series lookup
size int64 // tracks current file size
modTime time.Time // tracks last time write occurred
2016-10-21 15:31:40 +00:00
2016-11-28 22:12:22 +00:00
mSketch, mTSketch estimator.Sketch // Measurement sketches
sSketch, sTSketch estimator.Sketch // Series sketche
2018-01-12 22:05:54 +00:00
// In-memory series existence/tombstone sets.
seriesIDSet, tombstoneSeriesIDSet *tsdb.SeriesIDSet
2016-10-21 15:31:40 +00:00
// In-memory index.
2016-11-11 16:25:53 +00:00
mms logMeasurements
2016-10-21 15:31:40 +00:00
// Filepath to the log file.
2017-02-01 21:19:24 +00:00
path string
2016-10-21 15:31:40 +00:00
}
// NewLogFile returns a new instance of LogFile.
2017-11-15 23:09:25 +00:00
func NewLogFile(sfile *tsdb.SeriesFile, path string) *LogFile {
2016-10-21 15:31:40 +00:00
return &LogFile{
2017-09-14 15:41:58 +00:00
sfile: sfile,
2017-02-06 18:14:13 +00:00
path: path,
2016-11-28 22:12:22 +00:00
mms: make(logMeasurements),
mSketch: hll.NewDefaultPlus(),
mTSketch: hll.NewDefaultPlus(),
sSketch: hll.NewDefaultPlus(),
sTSketch: hll.NewDefaultPlus(),
2018-01-12 22:05:54 +00:00
seriesIDSet: tsdb.NewSeriesIDSet(),
tombstoneSeriesIDSet: tsdb.NewSeriesIDSet(),
2016-10-21 15:31:40 +00:00
}
}
// Open reads the log from a file and validates all the checksums.
func (f *LogFile) Open() error {
if err := f.open(); err != nil {
f.Close()
return err
}
return nil
}
func (f *LogFile) open() error {
2017-04-25 16:26:45 +00:00
f.id, _ = ParseFilename(f.path)
2016-10-21 15:31:40 +00:00
// Open file for appending.
2018-03-05 20:27:48 +00:00
file, err := os.OpenFile(f.Path(), os.O_WRONLY|os.O_CREATE, 0666)
2016-10-21 15:31:40 +00:00
if err != nil {
return err
}
f.file = file
f.w = bufio.NewWriter(f.file)
2016-10-21 15:31:40 +00:00
2016-11-10 15:45:27 +00:00
// Finish opening if file is empty.
fi, err := file.Stat()
if err != nil {
return err
} else if fi.Size() == 0 {
return nil
}
2016-12-15 15:31:18 +00:00
f.size = fi.Size()
f.modTime = fi.ModTime()
2016-11-10 15:45:27 +00:00
2016-10-21 15:31:40 +00:00
// Open a read-only memory map of the existing data.
2017-09-14 15:41:58 +00:00
data, err := mmap.Map(f.Path(), 0)
2016-10-21 15:31:40 +00:00
if err != nil {
return err
}
f.data = data
// Read log entries from mmap.
var n int64
2016-10-21 15:31:40 +00:00
for buf := f.data; len(buf) > 0; {
// Read next entry. Truncate partial writes.
2016-10-21 15:31:40 +00:00
var e LogEntry
2018-03-05 20:27:48 +00:00
if err := e.UnmarshalBinary(buf); err == io.ErrShortBuffer || err == ErrLogEntryChecksumMismatch {
break
} else if err != nil {
2016-10-21 15:31:40 +00:00
return err
}
2016-11-11 16:25:53 +00:00
// Execute entry against in-memory index.
f.execEntry(&e)
2016-10-21 15:31:40 +00:00
// Move buffer forward.
n += int64(e.Size)
2016-10-21 15:31:40 +00:00
buf = buf[e.Size:]
}
2018-03-05 20:27:48 +00:00
// Move to the end of the file.
f.size = n
_, err = file.Seek(n, io.SeekStart)
return err
2016-10-21 15:31:40 +00:00
}
// Close shuts down the file handle and mmap.
func (f *LogFile) Close() error {
2017-01-02 16:29:18 +00:00
// Wait until the file has no more references.
f.wg.Wait()
if f.w != nil {
f.w.Flush()
f.w = nil
}
2016-10-21 15:31:40 +00:00
if f.file != nil {
f.file.Close()
2016-10-25 14:36:58 +00:00
f.file = nil
2016-10-21 15:31:40 +00:00
}
2016-10-21 15:31:40 +00:00
if f.data != nil {
mmap.Unmap(f.data)
}
2016-11-11 16:25:53 +00:00
f.mms = make(logMeasurements)
2016-10-21 15:31:40 +00:00
return nil
}
2017-03-15 17:23:58 +00:00
// Flush flushes buffered data to disk.
func (f *LogFile) Flush() error {
if f.w != nil {
return f.w.Flush()
}
return nil
}
2017-04-25 16:26:45 +00:00
// ID returns the file sequence identifier.
func (f *LogFile) ID() int { return f.id }
2017-02-01 21:19:24 +00:00
// Path returns the file path.
func (f *LogFile) Path() string { return f.path }
// SetPath sets the log file's path.
func (f *LogFile) SetPath(path string) { f.path = path }
2017-04-25 16:26:45 +00:00
// Level returns the log level of the file.
func (f *LogFile) Level() int { return 0 }
// Filter returns the bloom filter for the file.
func (f *LogFile) Filter() *bloom.Filter { return nil }
2016-12-28 19:59:09 +00:00
// Retain adds a reference count to the file.
func (f *LogFile) Retain() { f.wg.Add(1) }
// Release removes a reference count from the file.
func (f *LogFile) Release() { f.wg.Done() }
// Stat returns size and last modification time of the file.
func (f *LogFile) Stat() (int64, time.Time) {
f.mu.RLock()
size, modTime := f.size, f.modTime
f.mu.RUnlock()
return size, modTime
2016-12-15 15:31:18 +00:00
}
2018-01-12 22:05:54 +00:00
// SeriesIDSet returns the series existence set.
func (f *LogFile) SeriesIDSet() (*tsdb.SeriesIDSet, error) {
return f.seriesIDSet, nil
}
// TombstoneSeriesIDSet returns the series tombstone set.
func (f *LogFile) TombstoneSeriesIDSet() (*tsdb.SeriesIDSet, error) {
return f.tombstoneSeriesIDSet, nil
}
// Size returns the size of the file, in bytes.
func (f *LogFile) Size() int64 {
f.mu.RLock()
v := f.size
f.mu.RUnlock()
return v
}
2016-11-27 16:34:03 +00:00
// Measurement returns a measurement element.
func (f *LogFile) Measurement(name []byte) MeasurementElem {
f.mu.RLock()
defer f.mu.RUnlock()
mm, ok := f.mms[string(name)]
if !ok {
return nil
}
2017-01-12 16:29:40 +00:00
2017-01-02 16:29:18 +00:00
return mm
2016-11-27 16:34:03 +00:00
}
func (f *LogFile) MeasurementHasSeries(ss *tsdb.SeriesIDSet, name []byte) bool {
f.mu.RLock()
defer f.mu.RUnlock()
mm, ok := f.mms[string(name)]
if !ok {
return false
}
for id := range mm.series {
if ss.Contains(id) {
return true
}
}
return false
}
2016-11-11 16:25:53 +00:00
// MeasurementNames returns an ordered list of measurement names.
func (f *LogFile) MeasurementNames() []string {
f.mu.RLock()
defer f.mu.RUnlock()
2016-12-15 15:31:18 +00:00
return f.measurementNames()
}
2016-12-15 15:31:18 +00:00
func (f *LogFile) measurementNames() []string {
2016-11-11 16:25:53 +00:00
a := make([]string, 0, len(f.mms))
for name := range f.mms {
a = append(a, name)
}
2017-02-16 16:39:51 +00:00
sort.Strings(a)
2016-11-11 16:25:53 +00:00
return a
2016-11-08 21:07:01 +00:00
}
2016-10-21 15:31:40 +00:00
// DeleteMeasurement adds a tombstone for a measurement to the log file.
func (f *LogFile) DeleteMeasurement(name []byte) error {
f.mu.Lock()
defer f.mu.Unlock()
2016-11-11 16:25:53 +00:00
e := LogEntry{Flag: LogEntryMeasurementTombstoneFlag, Name: name}
if err := f.appendEntry(&e); err != nil {
2016-10-21 15:31:40 +00:00
return err
}
2016-11-11 16:25:53 +00:00
f.execEntry(&e)
2016-10-21 15:31:40 +00:00
return nil
}
2017-09-14 15:41:58 +00:00
// TagKeySeriesIDIterator returns a series iterator for a tag key.
func (f *LogFile) TagKeySeriesIDIterator(name, key []byte) tsdb.SeriesIDIterator {
2016-11-27 16:34:03 +00:00
f.mu.RLock()
defer f.mu.RUnlock()
mm, ok := f.mms[string(name)]
if !ok {
2016-11-29 18:09:33 +00:00
return nil
2016-11-27 16:34:03 +00:00
}
tk, ok := mm.tagSet[string(key)]
if !ok {
2016-11-29 18:09:33 +00:00
return nil
2016-11-27 16:34:03 +00:00
}
// Combine iterators across all tag keys.
itrs := make([]tsdb.SeriesIDIterator, 0, len(tk.tagValues))
2016-11-27 16:34:03 +00:00
for _, tv := range tk.tagValues {
2016-12-15 15:31:18 +00:00
if len(tv.series) == 0 {
continue
}
2017-09-14 15:41:58 +00:00
itrs = append(itrs, newLogSeriesIDIterator(tv.series))
2016-11-27 16:34:03 +00:00
}
2016-12-15 15:31:18 +00:00
2017-11-29 18:20:18 +00:00
return tsdb.MergeSeriesIDIterators(itrs...)
2016-11-27 16:34:03 +00:00
}
2016-11-29 18:09:33 +00:00
// TagKeyIterator returns a value iterator for a measurement.
func (f *LogFile) TagKeyIterator(name []byte) TagKeyIterator {
2016-11-27 20:15:32 +00:00
f.mu.RLock()
defer f.mu.RUnlock()
mm, ok := f.mms[string(name)]
if !ok {
2016-11-29 18:09:33 +00:00
return nil
2016-11-27 20:15:32 +00:00
}
2016-11-29 18:09:33 +00:00
a := make([]logTagKey, 0, len(mm.tagSet))
for _, k := range mm.tagSet {
a = append(a, k)
}
return newLogTagKeyIterator(a)
}
// TagKey returns a tag key element.
func (f *LogFile) TagKey(name, key []byte) TagKeyElem {
f.mu.RLock()
defer f.mu.RUnlock()
mm, ok := f.mms[string(name)]
if !ok {
return nil
}
tk, ok := mm.tagSet[string(key)]
if !ok {
return nil
}
return &tk
}
2016-12-05 17:51:06 +00:00
// TagValue returns a tag value element.
func (f *LogFile) TagValue(name, key, value []byte) TagValueElem {
f.mu.RLock()
defer f.mu.RUnlock()
mm, ok := f.mms[string(name)]
if !ok {
return nil
}
tk, ok := mm.tagSet[string(key)]
if !ok {
return nil
}
tv, ok := tk.tagValues[string(value)]
if !ok {
return nil
}
return &tv
}
2016-11-29 18:09:33 +00:00
// TagValueIterator returns a value iterator for a tag key.
func (f *LogFile) TagValueIterator(name, key []byte) TagValueIterator {
f.mu.RLock()
defer f.mu.RUnlock()
mm, ok := f.mms[string(name)]
2016-11-27 20:15:32 +00:00
if !ok {
2016-11-29 18:09:33 +00:00
return nil
2016-11-27 20:15:32 +00:00
}
2016-11-29 18:09:33 +00:00
tk, ok := mm.tagSet[string(key)]
if !ok {
return nil
2016-11-27 20:15:32 +00:00
}
2016-11-29 18:09:33 +00:00
return tk.TagValueIterator()
2016-11-27 20:15:32 +00:00
}
2016-10-21 15:31:40 +00:00
// DeleteTagKey adds a tombstone for a tag key to the log file.
func (f *LogFile) DeleteTagKey(name, key []byte) error {
f.mu.Lock()
defer f.mu.Unlock()
2017-09-14 15:41:58 +00:00
e := LogEntry{Flag: LogEntryTagKeyTombstoneFlag, Name: name, Key: key}
2016-11-11 16:25:53 +00:00
if err := f.appendEntry(&e); err != nil {
return err
}
f.execEntry(&e)
return nil
2016-10-21 15:31:40 +00:00
}
2017-09-14 15:41:58 +00:00
// TagValueSeriesIDIterator returns a series iterator for a tag value.
func (f *LogFile) TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator {
2016-11-27 16:34:03 +00:00
f.mu.RLock()
defer f.mu.RUnlock()
mm, ok := f.mms[string(name)]
if !ok {
2016-11-29 18:09:33 +00:00
return nil
2016-11-27 16:34:03 +00:00
}
tk, ok := mm.tagSet[string(key)]
if !ok {
2016-11-29 18:09:33 +00:00
return nil
2016-11-27 16:34:03 +00:00
}
tv, ok := tk.tagValues[string(value)]
if !ok {
2016-11-29 18:09:33 +00:00
return nil
2016-12-15 15:31:18 +00:00
} else if len(tv.series) == 0 {
return nil
2016-11-27 16:34:03 +00:00
}
2016-12-15 15:31:18 +00:00
2017-09-14 15:41:58 +00:00
return newLogSeriesIDIterator(tv.series)
2016-11-27 16:34:03 +00:00
}
2017-05-05 21:06:07 +00:00
// MeasurementN returns the total number of measurements.
func (f *LogFile) MeasurementN() (n uint64) {
f.mu.RLock()
defer f.mu.RUnlock()
return uint64(len(f.mms))
}
// TagKeyN returns the total number of keys.
func (f *LogFile) TagKeyN() (n uint64) {
f.mu.RLock()
defer f.mu.RUnlock()
for _, mm := range f.mms {
n += uint64(len(mm.tagSet))
}
return n
}
// TagValueN returns the total number of values.
func (f *LogFile) TagValueN() (n uint64) {
f.mu.RLock()
defer f.mu.RUnlock()
for _, mm := range f.mms {
for _, k := range mm.tagSet {
n += uint64(len(k.tagValues))
}
}
return n
}
2016-10-21 15:31:40 +00:00
// DeleteTagValue adds a tombstone for a tag value to the log file.
func (f *LogFile) DeleteTagValue(name, key, value []byte) error {
f.mu.Lock()
defer f.mu.Unlock()
2017-09-14 15:41:58 +00:00
e := LogEntry{Flag: LogEntryTagValueTombstoneFlag, Name: name, Key: key, Value: value}
2016-11-11 16:25:53 +00:00
if err := f.appendEntry(&e); err != nil {
return err
}
f.execEntry(&e)
return nil
}
// AddSeriesList adds a list of series to the log file in bulk.
2018-01-08 11:39:29 +00:00
func (f *LogFile) AddSeriesList(seriesSet *tsdb.SeriesIDSet, names [][]byte, tagsSlice []models.Tags) error {
2017-09-14 15:41:58 +00:00
buf := make([]byte, 2048)
2017-08-29 15:31:57 +00:00
seriesIDs, err := f.sfile.CreateSeriesListIfNotExists(names, tagsSlice, buf[:0])
if err != nil {
return err
}
2018-01-03 19:19:02 +00:00
var writeRequired bool
entries := make([]LogEntry, 0, len(names))
seriesSet.RLock()
2017-08-29 15:31:57 +00:00
for i := range names {
if seriesSet.ContainsNoLock(seriesIDs[i]) {
// We don't need to allocate anything for this series.
continue
}
2018-01-03 19:19:02 +00:00
writeRequired = true
entries = append(entries, LogEntry{SeriesID: seriesIDs[i], name: names[i], tags: tagsSlice[i], cached: true})
2018-01-03 19:19:02 +00:00
}
seriesSet.RUnlock()
// Exit if all series already exist.
if !writeRequired {
return nil
2017-08-29 15:31:57 +00:00
}
f.mu.Lock()
defer f.mu.Unlock()
2018-01-03 19:19:02 +00:00
seriesSet.Lock()
defer seriesSet.Unlock()
2017-08-29 15:31:57 +00:00
for i := range entries {
2018-01-03 19:19:02 +00:00
entry := &entries[i]
if seriesSet.ContainsNoLock(entry.SeriesID) {
2018-01-03 19:19:02 +00:00
// We don't need to allocate anything for this series.
continue
}
2018-01-03 19:19:02 +00:00
if err := f.appendEntry(entry); err != nil {
return err
}
2018-01-03 19:19:02 +00:00
f.execEntry(entry)
seriesSet.AddNoLock(entry.SeriesID)
}
return nil
}
2018-01-12 22:05:54 +00:00
// DeleteSeriesID adds a tombstone for a series id.
func (f *LogFile) DeleteSeriesID(id uint64) error {
f.mu.Lock()
defer f.mu.Unlock()
e := LogEntry{Flag: LogEntrySeriesTombstoneFlag, SeriesID: id}
if err := f.appendEntry(&e); err != nil {
return err
}
f.execEntry(&e)
return nil
}
2016-11-11 16:25:53 +00:00
// SeriesN returns the total number of series in the file.
func (f *LogFile) SeriesN() (n uint64) {
f.mu.RLock()
defer f.mu.RUnlock()
2016-11-11 16:25:53 +00:00
for _, mm := range f.mms {
n += uint64(len(mm.series))
}
return n
2016-10-21 15:31:40 +00:00
}
2016-11-11 16:25:53 +00:00
// appendEntry adds a log entry to the end of the file.
func (f *LogFile) appendEntry(e *LogEntry) error {
// Marshal entry to the local buffer.
f.buf = appendLogEntry(f.buf[:0], e)
2016-10-21 15:31:40 +00:00
2016-11-11 16:25:53 +00:00
// Save the size of the record.
e.Size = len(f.buf)
2016-10-21 15:31:40 +00:00
2016-11-11 16:25:53 +00:00
// Write record to file.
n, err := f.w.Write(f.buf)
2016-12-15 15:31:18 +00:00
if err != nil {
2016-11-11 16:25:53 +00:00
// Move position backwards over partial entry.
// Log should be reopened if seeking cannot be completed.
if n > 0 {
f.w.Reset(f.file)
2018-01-03 17:04:12 +00:00
if _, err := f.file.Seek(int64(-n), io.SeekCurrent); err != nil {
2016-11-11 16:25:53 +00:00
f.Close()
}
}
2016-10-21 15:31:40 +00:00
return err
}
// Update in-memory file size & modification time.
2016-12-15 15:31:18 +00:00
f.size += int64(n)
f.modTime = time.Now()
2016-11-11 16:25:53 +00:00
return nil
}
// execEntry executes a log entry against the in-memory index.
// This is done after appending and on replay of the log.
func (f *LogFile) execEntry(e *LogEntry) {
switch e.Flag {
case LogEntryMeasurementTombstoneFlag:
f.execDeleteMeasurementEntry(e)
case LogEntryTagKeyTombstoneFlag:
f.execDeleteTagKeyEntry(e)
case LogEntryTagValueTombstoneFlag:
f.execDeleteTagValueEntry(e)
default:
f.execSeriesEntry(e)
}
}
func (f *LogFile) execDeleteMeasurementEntry(e *LogEntry) {
2017-02-08 16:00:08 +00:00
mm := f.createMeasurementIfNotExists(e.Name)
2016-11-11 16:25:53 +00:00
mm.deleted = true
2016-11-29 18:09:33 +00:00
mm.tagSet = make(map[string]logTagKey)
2017-10-26 19:55:00 +00:00
mm.series = make(map[uint64]struct{})
2016-11-28 22:12:22 +00:00
// Update measurement tombstone sketch.
f.mTSketch.Add(e.Name)
2016-11-11 16:25:53 +00:00
}
func (f *LogFile) execDeleteTagKeyEntry(e *LogEntry) {
2017-02-08 16:00:08 +00:00
mm := f.createMeasurementIfNotExists(e.Name)
2017-09-14 15:41:58 +00:00
ts := mm.createTagSetIfNotExists(e.Key)
2016-11-11 16:25:53 +00:00
ts.deleted = true
2017-09-14 15:41:58 +00:00
mm.tagSet[string(e.Key)] = ts
2016-11-11 16:25:53 +00:00
}
func (f *LogFile) execDeleteTagValueEntry(e *LogEntry) {
2017-02-08 16:00:08 +00:00
mm := f.createMeasurementIfNotExists(e.Name)
2017-09-14 15:41:58 +00:00
ts := mm.createTagSetIfNotExists(e.Key)
tv := ts.createTagValueIfNotExists(e.Value)
2016-11-11 16:25:53 +00:00
tv.deleted = true
2017-09-14 15:41:58 +00:00
ts.tagValues[string(e.Value)] = tv
mm.tagSet[string(e.Key)] = ts
2016-11-11 16:25:53 +00:00
}
func (f *LogFile) execSeriesEntry(e *LogEntry) {
var seriesKey []byte
if e.cached {
sz := tsdb.SeriesKeySize(e.name, e.tags)
if len(f.keyBuf) < sz {
f.keyBuf = make([]byte, 0, sz)
}
seriesKey = tsdb.AppendSeriesKey(f.keyBuf[:0], e.name, e.tags)
} else {
seriesKey = f.sfile.SeriesKey(e.SeriesID)
}
// Series keys can be removed if the series has been deleted from
// the entire database and the server is restarted. This would cause
// the log to replay its insert but the key cannot be found.
//
// https://github.com/influxdata/influxdb/issues/9444
if seriesKey == nil {
return
}
2017-09-14 15:41:58 +00:00
2018-01-12 22:05:54 +00:00
// Check if deleted.
deleted := e.Flag == LogEntrySeriesTombstoneFlag
2017-09-14 15:41:58 +00:00
// Read key size.
2017-11-15 23:09:25 +00:00
_, remainder := tsdb.ReadSeriesKeyLen(seriesKey)
2017-09-14 15:41:58 +00:00
// Read measurement name.
2017-11-15 23:09:25 +00:00
name, remainder := tsdb.ReadSeriesKeyMeasurement(remainder)
2017-09-18 19:03:47 +00:00
mm := f.createMeasurementIfNotExists(name)
2017-12-07 15:33:47 +00:00
mm.deleted = false
2018-01-12 22:05:54 +00:00
if !deleted {
mm.series[e.SeriesID] = struct{}{}
} else {
delete(mm.series, e.SeriesID)
}
2016-10-21 15:31:40 +00:00
2017-09-14 15:41:58 +00:00
// Read tag count.
2017-11-15 23:09:25 +00:00
tagN, remainder := tsdb.ReadSeriesKeyTagN(remainder)
2016-12-26 17:17:14 +00:00
2016-10-21 15:31:40 +00:00
// Save tags.
2017-09-14 15:41:58 +00:00
var k, v []byte
for i := 0; i < tagN; i++ {
2017-11-15 23:09:25 +00:00
k, v, remainder = tsdb.ReadSeriesKeyTag(remainder)
2017-09-14 15:41:58 +00:00
ts := mm.createTagSetIfNotExists(k)
tv := ts.createTagValueIfNotExists(v)
2016-10-21 15:31:40 +00:00
2018-01-12 22:05:54 +00:00
// Add/remove a reference to the series on the tag value.
if !deleted {
tv.series[e.SeriesID] = struct{}{}
} else {
delete(tv.series, e.SeriesID)
}
2016-10-21 15:31:40 +00:00
2017-09-14 15:41:58 +00:00
ts.tagValues[string(v)] = tv
mm.tagSet[string(k)] = ts
2016-10-21 15:31:40 +00:00
}
2018-01-12 22:05:54 +00:00
// Add/remove from appropriate series id sets.
if !deleted {
f.sSketch.Add(seriesKey) // Add series to sketch - key in series file format.
2018-01-12 22:05:54 +00:00
f.seriesIDSet.Add(e.SeriesID)
f.tombstoneSeriesIDSet.Remove(e.SeriesID)
} else {
f.sTSketch.Add(seriesKey) // Add series to tombstone sketch - key in series file format.
2018-01-12 22:05:54 +00:00
f.seriesIDSet.Remove(e.SeriesID)
f.tombstoneSeriesIDSet.Add(e.SeriesID)
}
2016-10-21 15:31:40 +00:00
}
2017-09-14 15:41:58 +00:00
// SeriesIDIterator returns an iterator over all series in the log file.
func (f *LogFile) SeriesIDIterator() tsdb.SeriesIDIterator {
2016-11-28 16:59:36 +00:00
f.mu.RLock()
defer f.mu.RUnlock()
2017-02-10 18:49:03 +00:00
// Determine total series count across all measurements.
2016-11-28 16:59:36 +00:00
var n int
2017-02-10 18:49:03 +00:00
mSeriesIdx := make([]int, len(f.mms))
mSeries := make([][]tsdb.SeriesIDElem, 0, len(f.mms))
2016-11-28 16:59:36 +00:00
for _, mm := range f.mms {
n += len(mm.series)
a := make([]tsdb.SeriesIDElem, 0, len(mm.series))
2017-10-25 13:29:44 +00:00
for seriesID := range mm.series {
a = append(a, tsdb.SeriesIDElem{SeriesID: seriesID})
}
sort.Sort(tsdb.SeriesIDElems(a))
mSeries = append(mSeries, a)
2016-11-28 16:59:36 +00:00
}
2017-02-10 18:49:03 +00:00
// Combine series across all measurements by merging the already sorted
// series lists.
sBuffer := make([]tsdb.SeriesIDElem, len(f.mms))
series := make([]tsdb.SeriesIDElem, 0, n)
var minElem tsdb.SeriesIDElem
2017-09-17 18:06:37 +00:00
var minElemIdx int
2017-02-10 18:49:03 +00:00
for s := 0; s < cap(series); s++ {
for i := 0; i < len(sBuffer); i++ {
// Are there still serie to pull from this measurement?
2017-09-17 18:06:37 +00:00
if mSeriesIdx[i] < len(mSeries[i]) && sBuffer[i].SeriesID == 0 {
2017-02-10 18:49:03 +00:00
// Fill the buffer slot for this measurement.
2017-09-17 18:06:37 +00:00
sBuffer[i] = mSeries[i][mSeriesIdx[i]]
2017-02-10 18:49:03 +00:00
mSeriesIdx[i]++
}
// Does this measurement have the smallest current serie out of
// all those in the buffer?
2017-09-17 18:06:37 +00:00
if minElem.SeriesID == 0 || (sBuffer[i].SeriesID != 0 && sBuffer[i].SeriesID < minElem.SeriesID) {
minElem, minElemIdx = sBuffer[i], i
2017-02-10 18:49:03 +00:00
}
2016-12-26 17:17:14 +00:00
}
2017-09-17 18:06:37 +00:00
series, minElem.SeriesID, sBuffer[minElemIdx].SeriesID = append(series, minElem), 0, 0
2016-11-28 16:59:36 +00:00
}
2016-12-15 15:31:18 +00:00
if len(series) == 0 {
return nil
}
2017-09-14 15:41:58 +00:00
return &logSeriesIDIterator{series: series}
2016-11-28 16:59:36 +00:00
}
2017-02-08 16:00:08 +00:00
// createMeasurementIfNotExists returns a measurement by name.
func (f *LogFile) createMeasurementIfNotExists(name []byte) *logMeasurement {
2017-01-02 16:29:18 +00:00
mm := f.mms[string(name)]
if mm == nil {
mm = &logMeasurement{
2016-12-26 17:17:14 +00:00
name: name,
tagSet: make(map[string]logTagKey),
2017-10-26 19:55:00 +00:00
series: make(map[uint64]struct{}),
2016-12-26 17:17:14 +00:00
}
2017-01-02 16:29:18 +00:00
f.mms[string(name)] = mm
// Add measurement to sketch.
f.mSketch.Add(name)
2017-01-06 16:31:25 +00:00
}
2016-10-21 15:31:40 +00:00
return mm
}
// MeasurementIterator returns an iterator over all the measurements in the file.
func (f *LogFile) MeasurementIterator() MeasurementIterator {
f.mu.RLock()
defer f.mu.RUnlock()
2016-10-31 14:46:07 +00:00
var itr logMeasurementIterator
2016-10-21 15:31:40 +00:00
for _, mm := range f.mms {
2017-01-02 16:29:18 +00:00
itr.mms = append(itr.mms, *mm)
2016-10-21 15:31:40 +00:00
}
2016-10-31 14:46:07 +00:00
sort.Sort(logMeasurementSlice(itr.mms))
2016-10-21 15:31:40 +00:00
return &itr
}
2017-09-14 15:41:58 +00:00
// MeasurementSeriesIDIterator returns an iterator over all series for a measurement.
func (f *LogFile) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator {
f.mu.RLock()
defer f.mu.RUnlock()
2016-11-10 15:45:27 +00:00
mm := f.mms[string(name)]
2017-01-02 16:29:18 +00:00
if mm == nil || len(mm.series) == 0 {
2016-12-15 15:31:18 +00:00
return nil
}
2017-09-14 15:41:58 +00:00
return newLogSeriesIDIterator(mm.series)
2016-11-08 21:07:01 +00:00
}
2017-05-23 19:42:38 +00:00
// CompactTo compacts the log file and writes it to w.
func (f *LogFile) CompactTo(w io.Writer, m, k uint64, cancel <-chan struct{}) (n int64, err error) {
2016-12-27 16:22:15 +00:00
f.mu.RLock()
defer f.mu.RUnlock()
2016-12-15 15:31:18 +00:00
// Check for cancellation.
select {
case <-cancel:
2018-01-29 19:44:27 +00:00
return n, ErrCompactionInterrupted
default:
}
2016-12-26 17:17:14 +00:00
// Wrap in bufferred writer.
bw := bufio.NewWriter(w)
2016-10-25 14:36:58 +00:00
2017-02-08 16:00:08 +00:00
// Setup compaction offset tracking data.
2016-12-26 17:17:14 +00:00
var t IndexFileTrailer
2017-02-08 16:00:08 +00:00
info := newLogFileCompactInfo()
info.cancel = cancel
2016-10-25 14:36:58 +00:00
// Write magic number.
2016-12-26 17:17:14 +00:00
if err := writeTo(bw, []byte(FileSignature), &n); err != nil {
2016-10-25 14:36:58 +00:00
return n, err
}
2017-04-03 17:05:48 +00:00
// Retreve measurement names in order.
names := f.measurementNames()
// Flush buffer & mmap series block.
if err := bw.Flush(); err != nil {
return n, err
}
2016-10-25 14:36:58 +00:00
// Write tagset blocks in measurement order.
2017-02-08 16:00:08 +00:00
if err := f.writeTagsetsTo(bw, names, info, &n); err != nil {
2016-10-25 14:36:58 +00:00
return n, err
}
// Write measurement block.
t.MeasurementBlock.Offset = n
2017-02-08 16:00:08 +00:00
if err := f.writeMeasurementBlockTo(bw, names, info, &n); err != nil {
2016-10-25 14:36:58 +00:00
return n, err
}
t.MeasurementBlock.Size = n - t.MeasurementBlock.Offset
2018-01-12 22:05:54 +00:00
// Write series set.
t.SeriesIDSet.Offset = n
nn, err := f.seriesIDSet.WriteTo(bw)
if n += nn; err != nil {
return n, err
}
t.SeriesIDSet.Size = n - t.SeriesIDSet.Offset
// Write tombstone series set.
t.TombstoneSeriesIDSet.Offset = n
nn, err = f.tombstoneSeriesIDSet.WriteTo(bw)
if n += nn; err != nil {
return n, err
}
t.TombstoneSeriesIDSet.Size = n - t.TombstoneSeriesIDSet.Offset
2018-02-05 18:51:03 +00:00
// Write series sketches. TODO(edd): Implement WriterTo on HLL++.
t.SeriesSketch.Offset = n
data, err := f.sSketch.MarshalBinary()
if err != nil {
return n, err
} else if _, err := bw.Write(data); err != nil {
2018-02-05 18:51:03 +00:00
return n, err
}
t.SeriesSketch.Size = int64(len(data))
n += t.SeriesSketch.Size
t.TombstoneSeriesSketch.Offset = n
if data, err = f.sTSketch.MarshalBinary(); err != nil {
return n, err
} else if _, err := bw.Write(data); err != nil {
2018-02-05 18:51:03 +00:00
return n, err
}
t.TombstoneSeriesSketch.Size = int64(len(data))
n += t.TombstoneSeriesSketch.Size
2016-10-25 14:36:58 +00:00
// Write trailer.
2018-01-12 22:05:54 +00:00
nn, err = t.WriteTo(bw)
2016-10-25 14:36:58 +00:00
n += nn
if err != nil {
return n, err
}
2016-12-26 17:17:14 +00:00
// Flush buffer.
if err := bw.Flush(); err != nil {
return n, err
}
2016-10-25 14:36:58 +00:00
return n, nil
}
2017-09-17 18:06:37 +00:00
func (f *LogFile) writeTagsetsTo(w io.Writer, names []string, info *logFileCompactInfo, n *int64) error {
2016-10-25 14:36:58 +00:00
for _, name := range names {
2017-02-08 16:00:08 +00:00
if err := f.writeTagsetTo(w, name, info, n); err != nil {
2016-10-25 14:36:58 +00:00
return err
}
}
return nil
}
// writeTagsetTo writes a single tagset to w and saves the tagset offset.
2017-09-17 18:06:37 +00:00
func (f *LogFile) writeTagsetTo(w io.Writer, name string, info *logFileCompactInfo, n *int64) error {
2016-10-25 14:36:58 +00:00
mm := f.mms[name]
// Check for cancellation.
select {
case <-info.cancel:
2018-01-29 19:44:27 +00:00
return ErrCompactionInterrupted
default:
}
2017-03-10 17:08:16 +00:00
enc := NewTagBlockEncoder(w)
var valueN int
2017-03-10 17:08:16 +00:00
for _, k := range mm.keys() {
tag := mm.tagSet[k]
// Encode tag. Skip values if tag is deleted.
if err := enc.EncodeKey(tag.name, tag.deleted); err != nil {
return err
} else if tag.deleted {
2016-10-25 14:36:58 +00:00
continue
}
// Sort tag values.
values := make([]string, 0, len(tag.tagValues))
for v := range tag.tagValues {
values = append(values, v)
}
sort.Strings(values)
2016-10-25 14:36:58 +00:00
// Add each value.
for _, v := range values {
value := tag.tagValues[v]
2017-09-14 15:41:58 +00:00
if err := enc.EncodeValue(value.name, value.deleted, value.seriesIDs()); err != nil {
2017-03-10 17:08:16 +00:00
return err
}
// Check for cancellation periodically.
if valueN++; valueN%1000 == 0 {
select {
case <-info.cancel:
2018-01-29 19:44:27 +00:00
return ErrCompactionInterrupted
default:
}
}
2016-10-25 14:36:58 +00:00
}
}
// Save tagset offset to measurement.
2017-09-14 15:41:58 +00:00
offset := *n
2016-10-25 14:36:58 +00:00
2017-03-10 17:08:16 +00:00
// Flush tag block.
err := enc.Close()
*n += enc.N()
2016-10-25 14:36:58 +00:00
if err != nil {
return err
}
// Save tagset offset to measurement.
2017-09-17 18:06:37 +00:00
size := *n - offset
2017-09-14 15:41:58 +00:00
info.mms[name] = &logFileMeasurementCompactInfo{offset: offset, size: size}
2016-10-25 14:36:58 +00:00
return nil
}
2017-02-08 16:00:08 +00:00
func (f *LogFile) writeMeasurementBlockTo(w io.Writer, names []string, info *logFileCompactInfo, n *int64) error {
2016-10-25 14:36:58 +00:00
mw := NewMeasurementBlockWriter()
// Check for cancellation.
select {
case <-info.cancel:
2018-01-29 19:44:27 +00:00
return ErrCompactionInterrupted
default:
}
2016-10-25 14:36:58 +00:00
// Add measurement data.
2017-01-09 17:10:12 +00:00
for _, name := range names {
mm := f.mms[name]
2017-02-08 16:00:08 +00:00
mmInfo := info.mms[name]
assert(mmInfo != nil, "measurement info not found")
2017-09-14 15:41:58 +00:00
mw.Add(mm.name, mm.deleted, mmInfo.offset, mmInfo.size, mm.seriesIDs())
2016-10-25 14:36:58 +00:00
}
2017-02-01 13:43:37 +00:00
// Flush data to writer.
2016-10-25 14:36:58 +00:00
nn, err := mw.WriteTo(w)
*n += nn
2017-02-01 13:43:37 +00:00
return err
2016-10-25 14:36:58 +00:00
}
2017-02-08 16:00:08 +00:00
// logFileCompactInfo is a context object to track compaction position info.
type logFileCompactInfo struct {
cancel <-chan struct{}
mms map[string]*logFileMeasurementCompactInfo
2017-02-08 16:00:08 +00:00
}
2016-11-11 16:25:53 +00:00
2017-02-08 16:00:08 +00:00
// newLogFileCompactInfo returns a new instance of logFileCompactInfo.
func newLogFileCompactInfo() *logFileCompactInfo {
return &logFileCompactInfo{
mms: make(map[string]*logFileMeasurementCompactInfo),
}
}
type logFileMeasurementCompactInfo struct {
2017-09-14 15:41:58 +00:00
offset int64
size int64
2016-10-25 14:36:58 +00:00
}
2017-02-01 15:33:30 +00:00
// MergeMeasurementsSketches merges the measurement sketches belonging to this
// LogFile into the provided sketches.
//
// MergeMeasurementsSketches is safe for concurrent use by multiple goroutines.
func (f *LogFile) MergeMeasurementsSketches(sketch, tsketch estimator.Sketch) error {
f.mu.RLock()
defer f.mu.RUnlock()
if err := sketch.Merge(f.mSketch); err != nil {
2016-12-20 15:58:06 +00:00
return err
}
2017-02-01 15:33:30 +00:00
return tsketch.Merge(f.mTSketch)
2016-12-20 15:58:06 +00:00
}
// MergeSeriesSketches merges the series sketches belonging to this
// LogFile into the provided sketches.
//
// MergeSeriesSketches is safe for concurrent use by multiple goroutines.
func (f *LogFile) MergeSeriesSketches(sketch, tsketch estimator.Sketch) error {
f.mu.RLock()
defer f.mu.RUnlock()
if err := sketch.Merge(f.sSketch); err != nil {
return err
}
return tsketch.Merge(f.sTSketch)
}
2016-10-21 15:31:40 +00:00
// LogEntry represents a single log entry in the write-ahead log.
type LogEntry struct {
2017-09-14 15:41:58 +00:00
Flag byte // flag
2017-09-26 13:40:26 +00:00
SeriesID uint64 // series id
2017-09-14 15:41:58 +00:00
Name []byte // measurement name
Key []byte // tag key
Value []byte // tag value
Checksum uint32 // checksum of flag/name/tags.
Size int // total size of record, in bytes.
cached bool // Hint to LogFile that series data is already parsed
name []byte // series naem, this is a cached copy of the parsed measurement name
tags models.Tags // series tags, this is a cached copied of the parsed tags
2016-10-21 15:31:40 +00:00
}
// UnmarshalBinary unmarshals data into e.
func (e *LogEntry) UnmarshalBinary(data []byte) error {
var sz uint64
var n int
2016-11-11 16:25:53 +00:00
orig := data
2016-10-21 15:31:40 +00:00
start := len(data)
// Parse flag data.
if len(data) < 1 {
return io.ErrShortBuffer
}
2016-10-21 15:31:40 +00:00
e.Flag, data = data[0], data[1:]
2017-09-14 15:41:58 +00:00
// Parse series id.
if len(data) < 1 {
return io.ErrShortBuffer
}
seriesID, n := binary.Uvarint(data)
2017-09-26 13:40:26 +00:00
e.SeriesID, data = uint64(seriesID), data[n:]
2017-09-14 15:41:58 +00:00
// Parse name length.
if len(data) < 1 {
return io.ErrShortBuffer
} else if sz, n = binary.Uvarint(data); n == 0 {
return io.ErrShortBuffer
}
// Read name data.
if len(data) < n+int(sz) {
return io.ErrShortBuffer
}
2016-10-21 15:31:40 +00:00
e.Name, data = data[n:n+int(sz)], data[n+int(sz):]
2017-09-14 15:41:58 +00:00
// Parse key length.
if len(data) < 1 {
return io.ErrShortBuffer
} else if sz, n = binary.Uvarint(data); n == 0 {
return io.ErrShortBuffer
}
2016-10-21 15:31:40 +00:00
2017-09-14 15:41:58 +00:00
// Read key data.
if len(data) < n+int(sz) {
return io.ErrShortBuffer
}
e.Key, data = data[n:n+int(sz)], data[n+int(sz):]
2016-10-21 15:31:40 +00:00
2017-09-14 15:41:58 +00:00
// Parse value length.
if len(data) < 1 {
return io.ErrShortBuffer
} else if sz, n = binary.Uvarint(data); n == 0 {
return io.ErrShortBuffer
2017-09-14 15:41:58 +00:00
}
2017-09-14 15:41:58 +00:00
// Read value data.
if len(data) < n+int(sz) {
return io.ErrShortBuffer
2016-10-21 15:31:40 +00:00
}
2017-09-14 15:41:58 +00:00
e.Value, data = data[n:n+int(sz)], data[n+int(sz):]
2016-10-21 15:31:40 +00:00
2016-11-11 16:25:53 +00:00
// Compute checksum.
chk := crc32.ChecksumIEEE(orig[:start-len(data)])
2016-10-21 15:31:40 +00:00
// Parse checksum.
if len(data) < 4 {
return io.ErrShortBuffer
}
2016-10-21 15:31:40 +00:00
e.Checksum, data = binary.BigEndian.Uint32(data[:4]), data[4:]
2016-11-11 16:25:53 +00:00
// Verify checksum.
if chk != e.Checksum {
return ErrLogEntryChecksumMismatch
}
2016-10-21 15:31:40 +00:00
// Save length of elem.
e.Size = start - len(data)
return nil
}
// appendLogEntry appends to dst and returns the new buffer.
// This updates the checksum on the entry.
func appendLogEntry(dst []byte, e *LogEntry) []byte {
var buf [binary.MaxVarintLen64]byte
start := len(dst)
// Append flag.
dst = append(dst, e.Flag)
2017-09-17 18:06:37 +00:00
// Append series id.
n := binary.PutUvarint(buf[:], uint64(e.SeriesID))
dst = append(dst, buf[:n]...)
2016-10-21 15:31:40 +00:00
// Append name.
2017-09-17 18:06:37 +00:00
n = binary.PutUvarint(buf[:], uint64(len(e.Name)))
2016-10-21 15:31:40 +00:00
dst = append(dst, buf[:n]...)
dst = append(dst, e.Name...)
2017-09-17 18:06:37 +00:00
// Append key.
n = binary.PutUvarint(buf[:], uint64(len(e.Key)))
2016-10-21 15:31:40 +00:00
dst = append(dst, buf[:n]...)
2017-09-17 18:06:37 +00:00
dst = append(dst, e.Key...)
2016-10-21 15:31:40 +00:00
2017-09-17 18:06:37 +00:00
// Append value.
n = binary.PutUvarint(buf[:], uint64(len(e.Value)))
dst = append(dst, buf[:n]...)
dst = append(dst, e.Value...)
2016-10-21 15:31:40 +00:00
// Calculate checksum.
e.Checksum = crc32.ChecksumIEEE(dst[start:])
// Append checksum.
binary.BigEndian.PutUint32(buf[:4], e.Checksum)
dst = append(dst, buf[:4]...)
return dst
}
2016-10-25 14:36:58 +00:00
// logMeasurements represents a map of measurement names to measurements.
2017-01-02 16:29:18 +00:00
type logMeasurements map[string]*logMeasurement
2016-10-25 14:36:58 +00:00
2016-10-21 15:31:40 +00:00
type logMeasurement struct {
name []byte
2016-11-29 18:09:33 +00:00
tagSet map[string]logTagKey
2016-10-21 15:31:40 +00:00
deleted bool
2017-10-26 19:55:00 +00:00
series map[uint64]struct{}
2016-10-21 15:31:40 +00:00
}
2017-09-26 13:40:26 +00:00
func (mm *logMeasurement) seriesIDs() []uint64 {
a := make([]uint64, 0, len(mm.series))
2017-09-17 18:06:37 +00:00
for seriesID := range mm.series {
a = append(a, seriesID)
}
2017-09-26 13:40:26 +00:00
sort.Sort(uint64Slice(a))
2017-09-17 18:06:37 +00:00
return a
}
2016-11-29 18:09:33 +00:00
func (m *logMeasurement) Name() []byte { return m.name }
func (m *logMeasurement) Deleted() bool { return m.deleted }
2016-10-31 14:46:07 +00:00
2016-11-29 18:09:33 +00:00
func (m *logMeasurement) createTagSetIfNotExists(key []byte) logTagKey {
2016-11-11 16:25:53 +00:00
ts, ok := m.tagSet[string(key)]
if !ok {
2016-11-29 18:09:33 +00:00
ts = logTagKey{name: key, tagValues: make(map[string]logTagValue)}
2016-11-11 16:25:53 +00:00
}
return ts
}
2017-03-10 17:08:16 +00:00
// keys returns a sorted list of tag keys.
func (m *logMeasurement) keys() []string {
a := make([]string, 0, len(m.tagSet))
for k := range m.tagSet {
a = append(a, k)
}
sort.Strings(a)
return a
}
2016-10-31 14:46:07 +00:00
// logMeasurementSlice is a sortable list of log measurements.
type logMeasurementSlice []logMeasurement
func (a logMeasurementSlice) Len() int { return len(a) }
func (a logMeasurementSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a logMeasurementSlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j].name) == -1 }
// logMeasurementIterator represents an iterator over a slice of measurements.
type logMeasurementIterator struct {
mms []logMeasurement
}
// Next returns the next element in the iterator.
func (itr *logMeasurementIterator) Next() (e MeasurementElem) {
if len(itr.mms) == 0 {
return nil
}
e, itr.mms = &itr.mms[0], itr.mms[1:]
return e
}
2016-11-29 18:09:33 +00:00
type logTagKey struct {
2016-10-21 15:31:40 +00:00
name []byte
deleted bool
2016-11-11 16:25:53 +00:00
tagValues map[string]logTagValue
}
2016-11-29 18:09:33 +00:00
func (tk *logTagKey) Key() []byte { return tk.name }
func (tk *logTagKey) Deleted() bool { return tk.deleted }
func (tk *logTagKey) TagValueIterator() TagValueIterator {
a := make([]logTagValue, 0, len(tk.tagValues))
for _, v := range tk.tagValues {
a = append(a, v)
}
return newLogTagValueIterator(a)
}
func (tk *logTagKey) createTagValueIfNotExists(value []byte) logTagValue {
tv, ok := tk.tagValues[string(value)]
2016-11-11 16:25:53 +00:00
if !ok {
2017-10-26 19:55:00 +00:00
tv = logTagValue{name: value, series: make(map[uint64]struct{})}
2016-11-11 16:25:53 +00:00
}
return tv
2016-10-21 15:31:40 +00:00
}
2016-11-29 18:09:33 +00:00
// logTagKey is a sortable list of log tag keys.
type logTagKeySlice []logTagKey
func (a logTagKeySlice) Len() int { return len(a) }
func (a logTagKeySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a logTagKeySlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j].name) == -1 }
2016-10-21 15:31:40 +00:00
type logTagValue struct {
name []byte
deleted bool
2017-10-26 19:55:00 +00:00
series map[uint64]struct{}
2016-10-21 15:31:40 +00:00
}
2017-09-26 13:40:26 +00:00
func (tv *logTagValue) seriesIDs() []uint64 {
a := make([]uint64, 0, len(tv.series))
2017-09-17 18:06:37 +00:00
for seriesID := range tv.series {
a = append(a, seriesID)
}
2017-09-26 13:40:26 +00:00
sort.Sort(uint64Slice(a))
2017-09-17 18:06:37 +00:00
return a
}
2016-11-29 18:09:33 +00:00
func (tv *logTagValue) Value() []byte { return tv.name }
func (tv *logTagValue) Deleted() bool { return tv.deleted }
2016-11-27 20:15:32 +00:00
// logTagValue is a sortable list of log tag values.
type logTagValueSlice []logTagValue
func (a logTagValueSlice) Len() int { return len(a) }
func (a logTagValueSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a logTagValueSlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j].name) == -1 }
2016-11-29 18:09:33 +00:00
// logTagKeyIterator represents an iterator over a slice of tag keys.
type logTagKeyIterator struct {
a []logTagKey
}
// newLogTagKeyIterator returns a new instance of logTagKeyIterator.
func newLogTagKeyIterator(a []logTagKey) *logTagKeyIterator {
sort.Sort(logTagKeySlice(a))
return &logTagKeyIterator{a: a}
}
// Next returns the next element in the iterator.
func (itr *logTagKeyIterator) Next() (e TagKeyElem) {
if len(itr.a) == 0 {
return nil
}
e, itr.a = &itr.a[0], itr.a[1:]
return e
}
2016-11-27 20:15:32 +00:00
// logTagValueIterator represents an iterator over a slice of tag values.
type logTagValueIterator struct {
a []logTagValue
}
// newLogTagValueIterator returns a new instance of logTagValueIterator.
func newLogTagValueIterator(a []logTagValue) *logTagValueIterator {
sort.Sort(logTagValueSlice(a))
return &logTagValueIterator{a: a}
}
// Next returns the next element in the iterator.
func (itr *logTagValueIterator) Next() (e TagValueElem) {
if len(itr.a) == 0 {
return nil
}
e, itr.a = &itr.a[0], itr.a[1:]
return e
}
2017-09-14 15:41:58 +00:00
// logSeriesIDIterator represents an iterator over a slice of series.
type logSeriesIDIterator struct {
series []tsdb.SeriesIDElem
2016-11-10 15:45:27 +00:00
}
2017-09-14 15:41:58 +00:00
// newLogSeriesIDIterator returns a new instance of logSeriesIDIterator.
2016-11-27 16:34:03 +00:00
// All series are copied to the iterator.
2017-10-26 19:55:00 +00:00
func newLogSeriesIDIterator(m map[uint64]struct{}) *logSeriesIDIterator {
if len(m) == 0 {
2016-12-15 15:31:18 +00:00
return nil
}
itr := logSeriesIDIterator{series: make([]tsdb.SeriesIDElem, 0, len(m))}
2017-10-26 19:55:00 +00:00
for seriesID := range m {
itr.series = append(itr.series, tsdb.SeriesIDElem{SeriesID: seriesID})
2016-12-26 17:17:14 +00:00
}
sort.Sort(tsdb.SeriesIDElems(itr.series))
2016-11-27 16:34:03 +00:00
return &itr
}
2017-11-29 18:20:18 +00:00
func (itr *logSeriesIDIterator) Close() error { return nil }
2016-11-10 15:45:27 +00:00
// Next returns the next element in the iterator.
2017-11-29 18:20:18 +00:00
func (itr *logSeriesIDIterator) Next() (tsdb.SeriesIDElem, error) {
2016-11-10 15:45:27 +00:00
if len(itr.series) == 0 {
2017-11-29 18:20:18 +00:00
return tsdb.SeriesIDElem{}, nil
2016-11-10 15:45:27 +00:00
}
2017-09-14 15:41:58 +00:00
elem := itr.series[0]
itr.series = itr.series[1:]
2017-11-29 18:20:18 +00:00
return elem, nil
2016-11-10 15:45:27 +00:00
}
2016-12-15 15:31:18 +00:00
// FormatLogFileName generates a log filename for the given index.
2017-04-25 16:26:45 +00:00
func FormatLogFileName(id int) string {
return fmt.Sprintf("L0-%08d%s", id, LogFileExt)
2016-12-15 15:31:18 +00:00
}