diff --git a/cmd/influxd/inspect/report_tsm/report_tsm.go b/cmd/influxd/inspect/report_tsm/report_tsm.go index 9aacf202dd..e6549d41c9 100644 --- a/cmd/influxd/inspect/report_tsm/report_tsm.go +++ b/cmd/influxd/inspect/report_tsm/report_tsm.go @@ -335,7 +335,7 @@ func (a *args) walkShardDirs(root string, fn func(db, rp, id, path string) error return nil } -// counter abstracts a a method of counting keys. +// counter abstracts a method of counting keys. type counter interface { Add(key []byte) Count() uint64 diff --git a/influxql/query/linear.go b/influxql/query/linear.go index 0da38f9815..7dc50dac99 100644 --- a/influxql/query/linear.go +++ b/influxql/query/linear.go @@ -1,6 +1,6 @@ package query -// linearFloat computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) +// linearFloat computes the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) // and returns the value of the point on the line with time windowTime // y = mx + b func linearFloat(windowTime, previousTime, nextTime int64, previousValue, nextValue float64) float64 { @@ -10,7 +10,7 @@ func linearFloat(windowTime, previousTime, nextTime int64, previousValue, nextVa return m*x + b } -// linearInteger computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) +// linearInteger computes the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) // and returns the value of the point on the line with time windowTime // y = mx + b func linearInteger(windowTime, previousTime, nextTime int64, previousValue, nextValue int64) int64 { @@ -20,7 +20,7 @@ func linearInteger(windowTime, previousTime, nextTime int64, previousValue, next return int64(m*x + b) } -// linearInteger computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) +// linearInteger computes the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) // and returns the value of the point on the line with time windowTime // y = mx + b func linearUnsigned(windowTime, previousTime, nextTime int64, previousValue, nextValue uint64) uint64 { diff --git a/models/points.go b/models/points.go index 5468c30f99..bad28d53a7 100644 --- a/models/points.go +++ b/models/points.go @@ -880,7 +880,7 @@ func scanFields(buf []byte, i int) (int, []byte, error) { // scanTime scans buf, starting at i for the time section of a point. It // returns the ending position and the byte slice of the timestamp within buf -// and and error if the timestamp is not in the correct numeric format. +// and error if the timestamp is not in the correct numeric format. func scanTime(buf []byte, i int) (int, []byte, error) { start := skipWhitespace(buf, i) i = start diff --git a/pkg/tar/stream.go b/pkg/tar/stream.go index 44b3fe031d..5b6328dc33 100644 --- a/pkg/tar/stream.go +++ b/pkg/tar/stream.go @@ -34,7 +34,7 @@ func Stream(w io.Writer, dir, relativePath string, writeFunc func(f os.FileInfo, return nil } - // Figure out the the full relative path including any sub-dirs + // Figure out the full relative path including any sub-dirs subDir, _ := filepath.Split(path) subDir, err = filepath.Rel(dir, subDir) if err != nil { @@ -66,7 +66,7 @@ func StreamFile(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Write return StreamRenameFile(f, f.Name(), shardRelativePath, fullPath, tw) } -/// Stream a single file to tw, using tarHeaderFileName instead of the actual filename +// Stream a single file to tw, using tarHeaderFileName instead of the actual filename // e.g., when we want to write a *.tmp file using the original file's non-tmp name. func StreamRenameFile(f os.FileInfo, tarHeaderFileName, relativePath, fullPath string, tw *tar.Writer) error { h, err := tar.FileInfoHeader(f, f.Name()) diff --git a/pkger/doc.go b/pkger/doc.go index 074244e95b..4737fe9c3d 100644 --- a/pkger/doc.go +++ b/pkger/doc.go @@ -23,7 +23,7 @@ The parser will validate all contents of the template and provide any and all fields/entries that failed validation. If you wish to use the Template type in your transport layer and let the -the transport layer manage the decoding, then you can run the following +transport layer manage the decoding, then you can run the following to validate the template after the raw decoding is done: if err := template.Validate(); err != nil { diff --git a/pprof/http_server.go b/pprof/http_server.go index f589137e40..09be62cc1e 100644 --- a/pprof/http_server.go +++ b/pprof/http_server.go @@ -114,7 +114,7 @@ func archiveProfilesHandler(w http.ResponseWriter, r *http.Request) { } // Capturing CPU profiles is a little trickier. The preferred way to send the - // the cpu profile duration is via the supplied "cpu" variable's value. + // cpu profile duration is via the supplied "cpu" variable's value. // // The duration should be encoded as a Go duration that can be parsed by // time.ParseDuration(). diff --git a/session.go b/session.go index 3c1434bc7d..e506ceb43e 100644 --- a/session.go +++ b/session.go @@ -14,7 +14,7 @@ const ErrSessionNotFound = "session not found" // ErrSessionExpired is the error message for expired sessions. const ErrSessionExpired = "session has expired" -// RenewSessionTime is the the time to extend session, currently set to 5min. +// RenewSessionTime is the time to extend session, currently set to 5min. var RenewSessionTime = time.Duration(time.Second * 300) // DefaultSessionLength is the default session length on initial creation. diff --git a/telegraf/plugins/plugins.go b/telegraf/plugins/plugins.go index 672190222a..647797ba9d 100644 --- a/telegraf/plugins/plugins.go +++ b/telegraf/plugins/plugins.go @@ -1158,7 +1158,7 @@ var availableInputs = `{ "type": "input", "name": "graylog", "description": "Read flattened metrics from one or more GrayLog HTTP endpoints", - "config": "# Read flattened metrics from one or more GrayLog HTTP endpoints\n[[inputs.graylog]]\n # alias=\"graylog\"\n ## API endpoint, currently supported API:\n ##\n ## - multiple (Ex http://\u003chost\u003e:12900/system/metrics/multiple)\n ## - namespace (Ex http://\u003chost\u003e:12900/system/metrics/namespace/{namespace})\n ##\n ## For namespace endpoint, the metrics array will be ignored for that call.\n ## Endpoint can contain namespace and multiple type calls.\n ##\n ## Please check http://[graylog-server-ip]:12900/api-browser for full list\n ## of endpoints\n servers = [\n \"http://[graylog-server-ip]:12900/system/metrics/multiple\",\n ]\n\n ## Metrics list\n ## List of metrics can be found on Graylog webservice documentation.\n ## Or by hitting the the web service api at:\n ## http://[graylog-host]:12900/system/metrics\n metrics = [\n \"jvm.cl.loaded\",\n \"jvm.memory.pools.Metaspace.committed\"\n ]\n\n ## Username and password\n username = \"\"\n password = \"\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" + "config": "# Read flattened metrics from one or more GrayLog HTTP endpoints\n[[inputs.graylog]]\n # alias=\"graylog\"\n ## API endpoint, currently supported API:\n ##\n ## - multiple (Ex http://\u003chost\u003e:12900/system/metrics/multiple)\n ## - namespace (Ex http://\u003chost\u003e:12900/system/metrics/namespace/{namespace})\n ##\n ## For namespace endpoint, the metrics array will be ignored for that call.\n ## Endpoint can contain namespace and multiple type calls.\n ##\n ## Please check http://[graylog-server-ip]:12900/api-browser for full list\n ## of endpoints\n servers = [\n \"http://[graylog-server-ip]:12900/system/metrics/multiple\",\n ]\n\n ## Metrics list\n ## List of metrics can be found on Graylog webservice documentation.\n ## Or by hitting the web service api at:\n ## http://[graylog-host]:12900/system/metrics\n metrics = [\n \"jvm.cl.loaded\",\n \"jvm.memory.pools.Metaspace.committed\"\n ]\n\n ## Username and password\n username = \"\"\n password = \"\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" }, { "type": "input", diff --git a/tsdb/engine/tsm1/cache.go b/tsdb/engine/tsm1/cache.go index 8c3a5d3769..d6bb2b9d56 100644 --- a/tsdb/engine/tsm1/cache.go +++ b/tsdb/engine/tsm1/cache.go @@ -182,7 +182,7 @@ type Cache struct { lastWriteTime time.Time // A one time synchronization used to initial the cache with a store. Since the store can allocate a - // a large amount memory across shards, we lazily create it. + // large amount memory across shards, we lazily create it. initialize atomic.Value initializedCount uint32 } diff --git a/tsdb/engine/tsm1/engine.go b/tsdb/engine/tsm1/engine.go index 0e79081a7c..61332ea30c 100644 --- a/tsdb/engine/tsm1/engine.go +++ b/tsdb/engine/tsm1/engine.go @@ -1587,7 +1587,7 @@ func (e *Engine) deleteSeriesRange(ctx context.Context, seriesKeys [][]byte, min } // The series are deleted on disk, but the index may still say they exist. - // Depending on the the min,max time passed in, the series may or not actually + // Depending on the min,max time passed in, the series may or not actually // exists now. To reconcile the index, we walk the series keys that still exists // on disk and cross out any keys that match the passed in series. Any series // left in the slice at the end do not exist and can be deleted from the index. diff --git a/tsdb/engine/tsm1/int.go b/tsdb/engine/tsm1/int.go index e13a26963b..d4f66a1046 100644 --- a/tsdb/engine/tsm1/int.go +++ b/tsdb/engine/tsm1/int.go @@ -296,7 +296,7 @@ func (d *IntegerDecoder) decodePacked() { } else { n, err := simple8b.Decode(&d.values, v) if err != nil { - // Should never happen, only error that could be returned is if the the value to be decoded was not + // Should never happen, only error that could be returned is if the value to be decoded was not // actually encoded by simple8b encoder. d.err = fmt.Errorf("failed to decode value %v: %v", v, err) } diff --git a/tsdb/engine/tsm1/timestamp.go b/tsdb/engine/tsm1/timestamp.go index c64d75734c..8856d931e1 100644 --- a/tsdb/engine/tsm1/timestamp.go +++ b/tsdb/engine/tsm1/timestamp.go @@ -88,7 +88,7 @@ func (e *encoder) reduce() (max, divisor uint64, rle bool, deltas []uint64) { // Starting values for a max and divisor max, divisor = 0, 1e12 - // Indicates whether the the deltas can be run-length encoded + // Indicates whether the deltas can be run-length encoded rle = true // Iterate in reverse so we can apply deltas in place diff --git a/tsdb/engine/tsm1/writer.go b/tsdb/engine/tsm1/writer.go index e1c6aebcf8..1611e444a1 100644 --- a/tsdb/engine/tsm1/writer.go +++ b/tsdb/engine/tsm1/writer.go @@ -36,7 +36,7 @@ composed of a sequence of index entries ordered lexicographically by key and then by time. Each index entry starts with a key length and key followed by a count of the number of blocks in the file. Each block entry is composed of the min and max time for the block, the offset into the file where the block -is located and the the size of the block. +is located and the size of the block. The index structure can provide efficient access to all blocks as well as the ability to determine the cost associated with accessing a given key. Given a key diff --git a/tsdb/index/tsi1/index_file_test.go b/tsdb/index/tsi1/index_file_test.go index 0f149318a9..87ea5ad210 100644 --- a/tsdb/index/tsi1/index_file_test.go +++ b/tsdb/index/tsi1/index_file_test.go @@ -57,7 +57,7 @@ func TestIndexFile_TagKeySeriesIDIterator(t *testing.T) { // the key with region=west ends up with a lower series ID than the region=east // series, even though it was written later. When the series id sets for each // tag block in the index file are merged together and iterated, the roaring - // bitmap library sorts the series ids, resulting the the series keys being + // bitmap library sorts the series ids, resulting the series keys being // emitted in a different order to that which they were written. exp := []string{"cpu,region=west", "cpu,region=east"} var got []string diff --git a/tsdb/shard_test.go b/tsdb/shard_test.go index b96f91c614..ad1105a4ee 100644 --- a/tsdb/shard_test.go +++ b/tsdb/shard_test.go @@ -1576,7 +1576,7 @@ func TestMeasurementFieldSet_Corrupt(t *testing.T) { if err != nil { t.Fatalf("stat error: %v", err) } - // Truncate the file to simulate a a corrupted file + // Truncate the file to simulate a corrupted file if err := os.Truncate(path, stat.Size()-3); err != nil { t.Fatalf("truncate error: %v", err) } diff --git a/tsdb/store.go b/tsdb/store.go index dabebc9c84..d1c616f2f0 100644 --- a/tsdb/store.go +++ b/tsdb/store.go @@ -940,7 +940,7 @@ func (s *Store) DeleteRetentionPolicy(database, name string) error { return err } - // Remove the retention policy folder from the the WAL. + // Remove the retention policy folder from the WAL. if err := os.RemoveAll(filepath.Join(s.EngineOptions.Config.WALDir, database, name)); err != nil { return err } @@ -1147,7 +1147,6 @@ func (s *Store) sketchesForDatabase(dbName string, getSketches func(*Shard) (est // // Cardinality is calculated exactly by unioning all shards' bitsets of series // IDs. The result of this method cannot be combined with any other results. -// func (s *Store) SeriesCardinality(ctx context.Context, database string) (int64, error) { s.mu.RLock() shards := s.filterShards(byDatabase(database)) diff --git a/tsdb/store_test.go b/tsdb/store_test.go index 6d612645db..1043c59eb0 100644 --- a/tsdb/store_test.go +++ b/tsdb/store_test.go @@ -1324,7 +1324,7 @@ func TestStore_Sketches(t *testing.T) { return fmt.Errorf("[initial|re-open] %v", err) } - // Delete half the the measurements data + // Delete half the measurements data mnames, err := store.MeasurementNames(context.Background(), nil, "db", nil) if err != nil { return err