chore: remove duplicate word in comments (#23685)
Signed-off-by: Abirdcfly <fp544037857@gmail.com> Signed-off-by: Abirdcfly <fp544037857@gmail.com>pull/23723/head
parent
1c6fbf9b2c
commit
c433342830
|
@ -335,7 +335,7 @@ func (a *args) walkShardDirs(root string, fn func(db, rp, id, path string) error
|
|||
return nil
|
||||
}
|
||||
|
||||
// counter abstracts a a method of counting keys.
|
||||
// counter abstracts a method of counting keys.
|
||||
type counter interface {
|
||||
Add(key []byte)
|
||||
Count() uint64
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package query
|
||||
|
||||
// linearFloat computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue)
|
||||
// linearFloat computes the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue)
|
||||
// and returns the value of the point on the line with time windowTime
|
||||
// y = mx + b
|
||||
func linearFloat(windowTime, previousTime, nextTime int64, previousValue, nextValue float64) float64 {
|
||||
|
@ -10,7 +10,7 @@ func linearFloat(windowTime, previousTime, nextTime int64, previousValue, nextVa
|
|||
return m*x + b
|
||||
}
|
||||
|
||||
// linearInteger computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue)
|
||||
// linearInteger computes the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue)
|
||||
// and returns the value of the point on the line with time windowTime
|
||||
// y = mx + b
|
||||
func linearInteger(windowTime, previousTime, nextTime int64, previousValue, nextValue int64) int64 {
|
||||
|
@ -20,7 +20,7 @@ func linearInteger(windowTime, previousTime, nextTime int64, previousValue, next
|
|||
return int64(m*x + b)
|
||||
}
|
||||
|
||||
// linearInteger computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue)
|
||||
// linearInteger computes the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue)
|
||||
// and returns the value of the point on the line with time windowTime
|
||||
// y = mx + b
|
||||
func linearUnsigned(windowTime, previousTime, nextTime int64, previousValue, nextValue uint64) uint64 {
|
||||
|
|
|
@ -880,7 +880,7 @@ func scanFields(buf []byte, i int) (int, []byte, error) {
|
|||
|
||||
// scanTime scans buf, starting at i for the time section of a point. It
|
||||
// returns the ending position and the byte slice of the timestamp within buf
|
||||
// and and error if the timestamp is not in the correct numeric format.
|
||||
// and error if the timestamp is not in the correct numeric format.
|
||||
func scanTime(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
|
|
@ -34,7 +34,7 @@ func Stream(w io.Writer, dir, relativePath string, writeFunc func(f os.FileInfo,
|
|||
return nil
|
||||
}
|
||||
|
||||
// Figure out the the full relative path including any sub-dirs
|
||||
// Figure out the full relative path including any sub-dirs
|
||||
subDir, _ := filepath.Split(path)
|
||||
subDir, err = filepath.Rel(dir, subDir)
|
||||
if err != nil {
|
||||
|
@ -66,7 +66,7 @@ func StreamFile(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Write
|
|||
return StreamRenameFile(f, f.Name(), shardRelativePath, fullPath, tw)
|
||||
}
|
||||
|
||||
/// Stream a single file to tw, using tarHeaderFileName instead of the actual filename
|
||||
// Stream a single file to tw, using tarHeaderFileName instead of the actual filename
|
||||
// e.g., when we want to write a *.tmp file using the original file's non-tmp name.
|
||||
func StreamRenameFile(f os.FileInfo, tarHeaderFileName, relativePath, fullPath string, tw *tar.Writer) error {
|
||||
h, err := tar.FileInfoHeader(f, f.Name())
|
||||
|
|
|
@ -23,7 +23,7 @@ The parser will validate all contents of the template and provide any
|
|||
and all fields/entries that failed validation.
|
||||
|
||||
If you wish to use the Template type in your transport layer and let the
|
||||
the transport layer manage the decoding, then you can run the following
|
||||
transport layer manage the decoding, then you can run the following
|
||||
to validate the template after the raw decoding is done:
|
||||
|
||||
if err := template.Validate(); err != nil {
|
||||
|
|
|
@ -114,7 +114,7 @@ func archiveProfilesHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
// Capturing CPU profiles is a little trickier. The preferred way to send the
|
||||
// the cpu profile duration is via the supplied "cpu" variable's value.
|
||||
// cpu profile duration is via the supplied "cpu" variable's value.
|
||||
//
|
||||
// The duration should be encoded as a Go duration that can be parsed by
|
||||
// time.ParseDuration().
|
||||
|
|
|
@ -14,7 +14,7 @@ const ErrSessionNotFound = "session not found"
|
|||
// ErrSessionExpired is the error message for expired sessions.
|
||||
const ErrSessionExpired = "session has expired"
|
||||
|
||||
// RenewSessionTime is the the time to extend session, currently set to 5min.
|
||||
// RenewSessionTime is the time to extend session, currently set to 5min.
|
||||
var RenewSessionTime = time.Duration(time.Second * 300)
|
||||
|
||||
// DefaultSessionLength is the default session length on initial creation.
|
||||
|
|
|
@ -1158,7 +1158,7 @@ var availableInputs = `{
|
|||
"type": "input",
|
||||
"name": "graylog",
|
||||
"description": "Read flattened metrics from one or more GrayLog HTTP endpoints",
|
||||
"config": "# Read flattened metrics from one or more GrayLog HTTP endpoints\n[[inputs.graylog]]\n # alias=\"graylog\"\n ## API endpoint, currently supported API:\n ##\n ## - multiple (Ex http://\u003chost\u003e:12900/system/metrics/multiple)\n ## - namespace (Ex http://\u003chost\u003e:12900/system/metrics/namespace/{namespace})\n ##\n ## For namespace endpoint, the metrics array will be ignored for that call.\n ## Endpoint can contain namespace and multiple type calls.\n ##\n ## Please check http://[graylog-server-ip]:12900/api-browser for full list\n ## of endpoints\n servers = [\n \"http://[graylog-server-ip]:12900/system/metrics/multiple\",\n ]\n\n ## Metrics list\n ## List of metrics can be found on Graylog webservice documentation.\n ## Or by hitting the the web service api at:\n ## http://[graylog-host]:12900/system/metrics\n metrics = [\n \"jvm.cl.loaded\",\n \"jvm.memory.pools.Metaspace.committed\"\n ]\n\n ## Username and password\n username = \"\"\n password = \"\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n"
|
||||
"config": "# Read flattened metrics from one or more GrayLog HTTP endpoints\n[[inputs.graylog]]\n # alias=\"graylog\"\n ## API endpoint, currently supported API:\n ##\n ## - multiple (Ex http://\u003chost\u003e:12900/system/metrics/multiple)\n ## - namespace (Ex http://\u003chost\u003e:12900/system/metrics/namespace/{namespace})\n ##\n ## For namespace endpoint, the metrics array will be ignored for that call.\n ## Endpoint can contain namespace and multiple type calls.\n ##\n ## Please check http://[graylog-server-ip]:12900/api-browser for full list\n ## of endpoints\n servers = [\n \"http://[graylog-server-ip]:12900/system/metrics/multiple\",\n ]\n\n ## Metrics list\n ## List of metrics can be found on Graylog webservice documentation.\n ## Or by hitting the web service api at:\n ## http://[graylog-host]:12900/system/metrics\n metrics = [\n \"jvm.cl.loaded\",\n \"jvm.memory.pools.Metaspace.committed\"\n ]\n\n ## Username and password\n username = \"\"\n password = \"\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n"
|
||||
},
|
||||
{
|
||||
"type": "input",
|
||||
|
|
|
@ -182,7 +182,7 @@ type Cache struct {
|
|||
lastWriteTime time.Time
|
||||
|
||||
// A one time synchronization used to initial the cache with a store. Since the store can allocate a
|
||||
// a large amount memory across shards, we lazily create it.
|
||||
// large amount memory across shards, we lazily create it.
|
||||
initialize atomic.Value
|
||||
initializedCount uint32
|
||||
}
|
||||
|
|
|
@ -1587,7 +1587,7 @@ func (e *Engine) deleteSeriesRange(ctx context.Context, seriesKeys [][]byte, min
|
|||
}
|
||||
|
||||
// The series are deleted on disk, but the index may still say they exist.
|
||||
// Depending on the the min,max time passed in, the series may or not actually
|
||||
// Depending on the min,max time passed in, the series may or not actually
|
||||
// exists now. To reconcile the index, we walk the series keys that still exists
|
||||
// on disk and cross out any keys that match the passed in series. Any series
|
||||
// left in the slice at the end do not exist and can be deleted from the index.
|
||||
|
|
|
@ -296,7 +296,7 @@ func (d *IntegerDecoder) decodePacked() {
|
|||
} else {
|
||||
n, err := simple8b.Decode(&d.values, v)
|
||||
if err != nil {
|
||||
// Should never happen, only error that could be returned is if the the value to be decoded was not
|
||||
// Should never happen, only error that could be returned is if the value to be decoded was not
|
||||
// actually encoded by simple8b encoder.
|
||||
d.err = fmt.Errorf("failed to decode value %v: %v", v, err)
|
||||
}
|
||||
|
|
|
@ -88,7 +88,7 @@ func (e *encoder) reduce() (max, divisor uint64, rle bool, deltas []uint64) {
|
|||
// Starting values for a max and divisor
|
||||
max, divisor = 0, 1e12
|
||||
|
||||
// Indicates whether the the deltas can be run-length encoded
|
||||
// Indicates whether the deltas can be run-length encoded
|
||||
rle = true
|
||||
|
||||
// Iterate in reverse so we can apply deltas in place
|
||||
|
|
|
@ -36,7 +36,7 @@ composed of a sequence of index entries ordered lexicographically by key and
|
|||
then by time. Each index entry starts with a key length and key followed by a
|
||||
count of the number of blocks in the file. Each block entry is composed of
|
||||
the min and max time for the block, the offset into the file where the block
|
||||
is located and the the size of the block.
|
||||
is located and the size of the block.
|
||||
|
||||
The index structure can provide efficient access to all blocks as well as the
|
||||
ability to determine the cost associated with accessing a given key. Given a key
|
||||
|
|
|
@ -57,7 +57,7 @@ func TestIndexFile_TagKeySeriesIDIterator(t *testing.T) {
|
|||
// the key with region=west ends up with a lower series ID than the region=east
|
||||
// series, even though it was written later. When the series id sets for each
|
||||
// tag block in the index file are merged together and iterated, the roaring
|
||||
// bitmap library sorts the series ids, resulting the the series keys being
|
||||
// bitmap library sorts the series ids, resulting the series keys being
|
||||
// emitted in a different order to that which they were written.
|
||||
exp := []string{"cpu,region=west", "cpu,region=east"}
|
||||
var got []string
|
||||
|
|
|
@ -1576,7 +1576,7 @@ func TestMeasurementFieldSet_Corrupt(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("stat error: %v", err)
|
||||
}
|
||||
// Truncate the file to simulate a a corrupted file
|
||||
// Truncate the file to simulate a corrupted file
|
||||
if err := os.Truncate(path, stat.Size()-3); err != nil {
|
||||
t.Fatalf("truncate error: %v", err)
|
||||
}
|
||||
|
|
|
@ -940,7 +940,7 @@ func (s *Store) DeleteRetentionPolicy(database, name string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Remove the retention policy folder from the the WAL.
|
||||
// Remove the retention policy folder from the WAL.
|
||||
if err := os.RemoveAll(filepath.Join(s.EngineOptions.Config.WALDir, database, name)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1147,7 +1147,6 @@ func (s *Store) sketchesForDatabase(dbName string, getSketches func(*Shard) (est
|
|||
//
|
||||
// Cardinality is calculated exactly by unioning all shards' bitsets of series
|
||||
// IDs. The result of this method cannot be combined with any other results.
|
||||
//
|
||||
func (s *Store) SeriesCardinality(ctx context.Context, database string) (int64, error) {
|
||||
s.mu.RLock()
|
||||
shards := s.filterShards(byDatabase(database))
|
||||
|
|
|
@ -1324,7 +1324,7 @@ func TestStore_Sketches(t *testing.T) {
|
|||
return fmt.Errorf("[initial|re-open] %v", err)
|
||||
}
|
||||
|
||||
// Delete half the the measurements data
|
||||
// Delete half the measurements data
|
||||
mnames, err := store.MeasurementNames(context.Background(), nil, "db", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
Loading…
Reference in New Issue