Merge pull request #14181 from influxdata/tg-fix-metrics-counts

fix(storage): fix counts for level 4+ files
pull/14231/head
tmgordeeva 2019-06-28 04:16:25 -07:00 committed by GitHub
commit b0396e1cfa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 33 additions and 5 deletions

View File

@ -332,8 +332,14 @@ func (t *fileTracker) Bytes() uint64 { return atomic.LoadUint64(&t.diskBytes) }
func (t *fileTracker) SetBytes(bytes map[int]uint64) {
total := uint64(0)
labels := t.Labels()
sizes := make(map[string]uint64)
for k, v := range bytes {
labels["level"] = formatLevel(uint64(k))
label := formatLevel(uint64(k))
sizes[label] += v
total += v
}
for k, v := range sizes {
labels["level"] = k
t.metrics.DiskSize.With(labels).Set(float64(v))
}
atomic.StoreUint64(&t.diskBytes, total)
@ -351,8 +357,13 @@ func (t *fileTracker) AddBytes(bytes uint64, level int) {
// SetFileCount sets the number of files in the FileStore.
func (t *fileTracker) SetFileCount(files map[int]uint64) {
labels := t.Labels()
counts := make(map[string]uint64)
for k, v := range files {
labels["level"] = formatLevel(uint64(k))
label := formatLevel(uint64(k))
counts[label] += v
}
for k, v := range counts {
labels["level"] = k
t.metrics.Files.With(labels).Set(float64(v))
}
}

View File

@ -13,6 +13,7 @@ func TestMetrics_Filestore(t *testing.T) {
t1 := newFileTracker(metrics, prometheus.Labels{"engine_id": "0", "node_id": "0"})
t2 := newFileTracker(metrics, prometheus.Labels{"engine_id": "1", "node_id": "0"})
t3 := newFileTracker(metrics, prometheus.Labels{"engine_id": "2", "node_id": "0"})
reg := prometheus.NewRegistry()
reg.MustRegister(metrics.PrometheusCollectors()...)
@ -22,7 +23,9 @@ func TestMetrics_Filestore(t *testing.T) {
t1.SetFileCount(map[int]uint64{0: 3})
t2.AddBytes(200, 0)
t2.SetFileCount(map[int]uint64{0: 4})
t2.SetFileCount(map[int]uint64{0: 4, 4: 3, 5: 1})
t3.SetBytes(map[int]uint64{0: 300, 1: 500, 4:100, 5: 100})
// Test that all the correct metrics are present.
mfs, err := reg.Gather()
@ -34,7 +37,10 @@ func TestMetrics_Filestore(t *testing.T) {
m1Bytes := promtest.MustFindMetric(t, mfs, base+"disk_bytes", prometheus.Labels{"engine_id": "0", "node_id": "0", "level": "0"})
m2Bytes := promtest.MustFindMetric(t, mfs, base+"disk_bytes", prometheus.Labels{"engine_id": "1", "node_id": "0", "level": "0"})
m1Files := promtest.MustFindMetric(t, mfs, base+"total", prometheus.Labels{"engine_id": "0", "node_id": "0", "level": "0"})
m2Files := promtest.MustFindMetric(t, mfs, base+"total", prometheus.Labels{"engine_id": "1", "node_id": "0", "level": "0"})
m2Files1 := promtest.MustFindMetric(t, mfs, base+"total", prometheus.Labels{"engine_id": "1", "node_id": "0", "level": "0"})
m2Files2 := promtest.MustFindMetric(t, mfs, base+"total", prometheus.Labels{"engine_id": "1", "node_id": "0", "level": "4+"})
m3Bytes1 := promtest.MustFindMetric(t, mfs, base+"disk_bytes", prometheus.Labels{"engine_id": "2", "node_id": "0", "level": "0"})
m3Bytes2 := promtest.MustFindMetric(t, mfs, base+"disk_bytes", prometheus.Labels{"engine_id": "2", "node_id": "0", "level": "4+"})
if m, got, exp := m1Bytes, m1Bytes.GetGauge().GetValue(), 100.0; got != exp {
t.Errorf("[%s] got %v, expected %v", m, got, exp)
@ -48,10 +54,21 @@ func TestMetrics_Filestore(t *testing.T) {
t.Errorf("[%s] got %v, expected %v", m, got, exp)
}
if m, got, exp := m2Files, m2Files.GetGauge().GetValue(), 4.0; got != exp {
if m, got, exp := m2Files1, m2Files1.GetGauge().GetValue(), 4.0; got != exp {
t.Errorf("[%s] got %v, expected %v", m, got, exp)
}
if m, got, exp := m2Files2, m2Files2.GetGauge().GetValue(), 4.0; got != exp {
t.Errorf("[%s] got %v, expected %v", m, got, exp)
}
if m, got, exp := m3Bytes1, m3Bytes1.GetGauge().GetValue(), 300.0; got != exp {
t.Errorf("[%s] got %v, expected %v", m, got, exp)
}
if m, got, exp := m3Bytes2, m3Bytes2.GetGauge().GetValue(), 200.0; got != exp {
t.Errorf("[%s] got %v, expected %v", m, got, exp)
}
}
func TestMetrics_Cache(t *testing.T) {