chore: remove duplicate word in comments (#23685)

Signed-off-by: Abirdcfly <fp544037857@gmail.com>

Signed-off-by: Abirdcfly <fp544037857@gmail.com>
pull/23723/head
Abirdcfly 2022-09-10 04:40:21 +08:00 committed by Jonathan A. Sternberg
parent 1c6fbf9b2c
commit c433342830
No known key found for this signature in database
GPG Key ID: 4A0C1200CB8B9D2E
17 changed files with 20 additions and 21 deletions

View File

@ -335,7 +335,7 @@ func (a *args) walkShardDirs(root string, fn func(db, rp, id, path string) error
return nil return nil
} }
// counter abstracts a a method of counting keys. // counter abstracts a method of counting keys.
type counter interface { type counter interface {
Add(key []byte) Add(key []byte)
Count() uint64 Count() uint64

View File

@ -1,6 +1,6 @@
package query package query
// linearFloat computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) // linearFloat computes the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue)
// and returns the value of the point on the line with time windowTime // and returns the value of the point on the line with time windowTime
// y = mx + b // y = mx + b
func linearFloat(windowTime, previousTime, nextTime int64, previousValue, nextValue float64) float64 { func linearFloat(windowTime, previousTime, nextTime int64, previousValue, nextValue float64) float64 {
@ -10,7 +10,7 @@ func linearFloat(windowTime, previousTime, nextTime int64, previousValue, nextVa
return m*x + b return m*x + b
} }
// linearInteger computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) // linearInteger computes the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue)
// and returns the value of the point on the line with time windowTime // and returns the value of the point on the line with time windowTime
// y = mx + b // y = mx + b
func linearInteger(windowTime, previousTime, nextTime int64, previousValue, nextValue int64) int64 { func linearInteger(windowTime, previousTime, nextTime int64, previousValue, nextValue int64) int64 {
@ -20,7 +20,7 @@ func linearInteger(windowTime, previousTime, nextTime int64, previousValue, next
return int64(m*x + b) return int64(m*x + b)
} }
// linearInteger computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) // linearInteger computes the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue)
// and returns the value of the point on the line with time windowTime // and returns the value of the point on the line with time windowTime
// y = mx + b // y = mx + b
func linearUnsigned(windowTime, previousTime, nextTime int64, previousValue, nextValue uint64) uint64 { func linearUnsigned(windowTime, previousTime, nextTime int64, previousValue, nextValue uint64) uint64 {

View File

@ -880,7 +880,7 @@ func scanFields(buf []byte, i int) (int, []byte, error) {
// scanTime scans buf, starting at i for the time section of a point. It // scanTime scans buf, starting at i for the time section of a point. It
// returns the ending position and the byte slice of the timestamp within buf // returns the ending position and the byte slice of the timestamp within buf
// and and error if the timestamp is not in the correct numeric format. // and error if the timestamp is not in the correct numeric format.
func scanTime(buf []byte, i int) (int, []byte, error) { func scanTime(buf []byte, i int) (int, []byte, error) {
start := skipWhitespace(buf, i) start := skipWhitespace(buf, i)
i = start i = start

View File

@ -34,7 +34,7 @@ func Stream(w io.Writer, dir, relativePath string, writeFunc func(f os.FileInfo,
return nil return nil
} }
// Figure out the the full relative path including any sub-dirs // Figure out the full relative path including any sub-dirs
subDir, _ := filepath.Split(path) subDir, _ := filepath.Split(path)
subDir, err = filepath.Rel(dir, subDir) subDir, err = filepath.Rel(dir, subDir)
if err != nil { if err != nil {
@ -66,7 +66,7 @@ func StreamFile(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Write
return StreamRenameFile(f, f.Name(), shardRelativePath, fullPath, tw) return StreamRenameFile(f, f.Name(), shardRelativePath, fullPath, tw)
} }
/// Stream a single file to tw, using tarHeaderFileName instead of the actual filename // Stream a single file to tw, using tarHeaderFileName instead of the actual filename
// e.g., when we want to write a *.tmp file using the original file's non-tmp name. // e.g., when we want to write a *.tmp file using the original file's non-tmp name.
func StreamRenameFile(f os.FileInfo, tarHeaderFileName, relativePath, fullPath string, tw *tar.Writer) error { func StreamRenameFile(f os.FileInfo, tarHeaderFileName, relativePath, fullPath string, tw *tar.Writer) error {
h, err := tar.FileInfoHeader(f, f.Name()) h, err := tar.FileInfoHeader(f, f.Name())

View File

@ -23,7 +23,7 @@ The parser will validate all contents of the template and provide any
and all fields/entries that failed validation. and all fields/entries that failed validation.
If you wish to use the Template type in your transport layer and let the If you wish to use the Template type in your transport layer and let the
the transport layer manage the decoding, then you can run the following transport layer manage the decoding, then you can run the following
to validate the template after the raw decoding is done: to validate the template after the raw decoding is done:
if err := template.Validate(); err != nil { if err := template.Validate(); err != nil {

View File

@ -114,7 +114,7 @@ func archiveProfilesHandler(w http.ResponseWriter, r *http.Request) {
} }
// Capturing CPU profiles is a little trickier. The preferred way to send the // Capturing CPU profiles is a little trickier. The preferred way to send the
// the cpu profile duration is via the supplied "cpu" variable's value. // cpu profile duration is via the supplied "cpu" variable's value.
// //
// The duration should be encoded as a Go duration that can be parsed by // The duration should be encoded as a Go duration that can be parsed by
// time.ParseDuration(). // time.ParseDuration().

View File

@ -14,7 +14,7 @@ const ErrSessionNotFound = "session not found"
// ErrSessionExpired is the error message for expired sessions. // ErrSessionExpired is the error message for expired sessions.
const ErrSessionExpired = "session has expired" const ErrSessionExpired = "session has expired"
// RenewSessionTime is the the time to extend session, currently set to 5min. // RenewSessionTime is the time to extend session, currently set to 5min.
var RenewSessionTime = time.Duration(time.Second * 300) var RenewSessionTime = time.Duration(time.Second * 300)
// DefaultSessionLength is the default session length on initial creation. // DefaultSessionLength is the default session length on initial creation.

View File

@ -1158,7 +1158,7 @@ var availableInputs = `{
"type": "input", "type": "input",
"name": "graylog", "name": "graylog",
"description": "Read flattened metrics from one or more GrayLog HTTP endpoints", "description": "Read flattened metrics from one or more GrayLog HTTP endpoints",
"config": "# Read flattened metrics from one or more GrayLog HTTP endpoints\n[[inputs.graylog]]\n # alias=\"graylog\"\n ## API endpoint, currently supported API:\n ##\n ## - multiple (Ex http://\u003chost\u003e:12900/system/metrics/multiple)\n ## - namespace (Ex http://\u003chost\u003e:12900/system/metrics/namespace/{namespace})\n ##\n ## For namespace endpoint, the metrics array will be ignored for that call.\n ## Endpoint can contain namespace and multiple type calls.\n ##\n ## Please check http://[graylog-server-ip]:12900/api-browser for full list\n ## of endpoints\n servers = [\n \"http://[graylog-server-ip]:12900/system/metrics/multiple\",\n ]\n\n ## Metrics list\n ## List of metrics can be found on Graylog webservice documentation.\n ## Or by hitting the the web service api at:\n ## http://[graylog-host]:12900/system/metrics\n metrics = [\n \"jvm.cl.loaded\",\n \"jvm.memory.pools.Metaspace.committed\"\n ]\n\n ## Username and password\n username = \"\"\n password = \"\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" "config": "# Read flattened metrics from one or more GrayLog HTTP endpoints\n[[inputs.graylog]]\n # alias=\"graylog\"\n ## API endpoint, currently supported API:\n ##\n ## - multiple (Ex http://\u003chost\u003e:12900/system/metrics/multiple)\n ## - namespace (Ex http://\u003chost\u003e:12900/system/metrics/namespace/{namespace})\n ##\n ## For namespace endpoint, the metrics array will be ignored for that call.\n ## Endpoint can contain namespace and multiple type calls.\n ##\n ## Please check http://[graylog-server-ip]:12900/api-browser for full list\n ## of endpoints\n servers = [\n \"http://[graylog-server-ip]:12900/system/metrics/multiple\",\n ]\n\n ## Metrics list\n ## List of metrics can be found on Graylog webservice documentation.\n ## Or by hitting the web service api at:\n ## http://[graylog-host]:12900/system/metrics\n metrics = [\n \"jvm.cl.loaded\",\n \"jvm.memory.pools.Metaspace.committed\"\n ]\n\n ## Username and password\n username = \"\"\n password = \"\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n"
}, },
{ {
"type": "input", "type": "input",

View File

@ -182,7 +182,7 @@ type Cache struct {
lastWriteTime time.Time lastWriteTime time.Time
// A one time synchronization used to initial the cache with a store. Since the store can allocate a // A one time synchronization used to initial the cache with a store. Since the store can allocate a
// a large amount memory across shards, we lazily create it. // large amount memory across shards, we lazily create it.
initialize atomic.Value initialize atomic.Value
initializedCount uint32 initializedCount uint32
} }

View File

@ -1587,7 +1587,7 @@ func (e *Engine) deleteSeriesRange(ctx context.Context, seriesKeys [][]byte, min
} }
// The series are deleted on disk, but the index may still say they exist. // The series are deleted on disk, but the index may still say they exist.
// Depending on the the min,max time passed in, the series may or not actually // Depending on the min,max time passed in, the series may or not actually
// exists now. To reconcile the index, we walk the series keys that still exists // exists now. To reconcile the index, we walk the series keys that still exists
// on disk and cross out any keys that match the passed in series. Any series // on disk and cross out any keys that match the passed in series. Any series
// left in the slice at the end do not exist and can be deleted from the index. // left in the slice at the end do not exist and can be deleted from the index.

View File

@ -296,7 +296,7 @@ func (d *IntegerDecoder) decodePacked() {
} else { } else {
n, err := simple8b.Decode(&d.values, v) n, err := simple8b.Decode(&d.values, v)
if err != nil { if err != nil {
// Should never happen, only error that could be returned is if the the value to be decoded was not // Should never happen, only error that could be returned is if the value to be decoded was not
// actually encoded by simple8b encoder. // actually encoded by simple8b encoder.
d.err = fmt.Errorf("failed to decode value %v: %v", v, err) d.err = fmt.Errorf("failed to decode value %v: %v", v, err)
} }

View File

@ -88,7 +88,7 @@ func (e *encoder) reduce() (max, divisor uint64, rle bool, deltas []uint64) {
// Starting values for a max and divisor // Starting values for a max and divisor
max, divisor = 0, 1e12 max, divisor = 0, 1e12
// Indicates whether the the deltas can be run-length encoded // Indicates whether the deltas can be run-length encoded
rle = true rle = true
// Iterate in reverse so we can apply deltas in place // Iterate in reverse so we can apply deltas in place

View File

@ -36,7 +36,7 @@ composed of a sequence of index entries ordered lexicographically by key and
then by time. Each index entry starts with a key length and key followed by a then by time. Each index entry starts with a key length and key followed by a
count of the number of blocks in the file. Each block entry is composed of count of the number of blocks in the file. Each block entry is composed of
the min and max time for the block, the offset into the file where the block the min and max time for the block, the offset into the file where the block
is located and the the size of the block. is located and the size of the block.
The index structure can provide efficient access to all blocks as well as the The index structure can provide efficient access to all blocks as well as the
ability to determine the cost associated with accessing a given key. Given a key ability to determine the cost associated with accessing a given key. Given a key

View File

@ -57,7 +57,7 @@ func TestIndexFile_TagKeySeriesIDIterator(t *testing.T) {
// the key with region=west ends up with a lower series ID than the region=east // the key with region=west ends up with a lower series ID than the region=east
// series, even though it was written later. When the series id sets for each // series, even though it was written later. When the series id sets for each
// tag block in the index file are merged together and iterated, the roaring // tag block in the index file are merged together and iterated, the roaring
// bitmap library sorts the series ids, resulting the the series keys being // bitmap library sorts the series ids, resulting the series keys being
// emitted in a different order to that which they were written. // emitted in a different order to that which they were written.
exp := []string{"cpu,region=west", "cpu,region=east"} exp := []string{"cpu,region=west", "cpu,region=east"}
var got []string var got []string

View File

@ -1576,7 +1576,7 @@ func TestMeasurementFieldSet_Corrupt(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("stat error: %v", err) t.Fatalf("stat error: %v", err)
} }
// Truncate the file to simulate a a corrupted file // Truncate the file to simulate a corrupted file
if err := os.Truncate(path, stat.Size()-3); err != nil { if err := os.Truncate(path, stat.Size()-3); err != nil {
t.Fatalf("truncate error: %v", err) t.Fatalf("truncate error: %v", err)
} }

View File

@ -940,7 +940,7 @@ func (s *Store) DeleteRetentionPolicy(database, name string) error {
return err return err
} }
// Remove the retention policy folder from the the WAL. // Remove the retention policy folder from the WAL.
if err := os.RemoveAll(filepath.Join(s.EngineOptions.Config.WALDir, database, name)); err != nil { if err := os.RemoveAll(filepath.Join(s.EngineOptions.Config.WALDir, database, name)); err != nil {
return err return err
} }
@ -1147,7 +1147,6 @@ func (s *Store) sketchesForDatabase(dbName string, getSketches func(*Shard) (est
// //
// Cardinality is calculated exactly by unioning all shards' bitsets of series // Cardinality is calculated exactly by unioning all shards' bitsets of series
// IDs. The result of this method cannot be combined with any other results. // IDs. The result of this method cannot be combined with any other results.
//
func (s *Store) SeriesCardinality(ctx context.Context, database string) (int64, error) { func (s *Store) SeriesCardinality(ctx context.Context, database string) (int64, error) {
s.mu.RLock() s.mu.RLock()
shards := s.filterShards(byDatabase(database)) shards := s.filterShards(byDatabase(database))

View File

@ -1324,7 +1324,7 @@ func TestStore_Sketches(t *testing.T) {
return fmt.Errorf("[initial|re-open] %v", err) return fmt.Errorf("[initial|re-open] %v", err)
} }
// Delete half the the measurements data // Delete half the measurements data
mnames, err := store.MeasurementNames(context.Background(), nil, "db", nil) mnames, err := store.MeasurementNames(context.Background(), nil, "db", nil)
if err != nil { if err != nil {
return err return err