feat: add chunk storage metrics (#2069)
* feat: add chunk storage metrics * chore: review feedback Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>pull/24376/head
parent
8c974beba0
commit
38e375d11a
|
@ -1698,13 +1698,13 @@ mod tests {
|
|||
|
||||
fn catalog_chunk_size_bytes_metric_eq(
|
||||
reg: &metrics::TestMetricRegistry,
|
||||
source: &'static str,
|
||||
location: &'static str,
|
||||
v: u64,
|
||||
) -> Result<(), metrics::Error> {
|
||||
reg.has_metric_family("catalog_chunks_mem_usage_bytes")
|
||||
.with_labels(&[
|
||||
("db_name", "placeholder"),
|
||||
("source", source),
|
||||
("location", location),
|
||||
("svr_id", "1"),
|
||||
])
|
||||
.gauge()
|
||||
|
@ -1714,7 +1714,22 @@ mod tests {
|
|||
#[tokio::test]
|
||||
async fn metrics_during_rollover() {
|
||||
let test_db = make_db().await;
|
||||
let db = test_db.db;
|
||||
let db = Arc::clone(&test_db.db);
|
||||
|
||||
let assert_metric = |name: &'static str, location: &'static str, value: f64| {
|
||||
test_db
|
||||
.metric_registry
|
||||
.has_metric_family(name)
|
||||
.with_labels(&[
|
||||
("db_name", "placeholder"),
|
||||
("location", location),
|
||||
("svr_id", "1"),
|
||||
("table", "cpu"),
|
||||
])
|
||||
.gauge()
|
||||
.eq(value)
|
||||
.unwrap();
|
||||
};
|
||||
|
||||
write_lp(db.as_ref(), "cpu bar=1 10").await;
|
||||
|
||||
|
@ -1731,11 +1746,18 @@ mod tests {
|
|||
.eq(1.0)
|
||||
.unwrap();
|
||||
|
||||
assert_metric("catalog_loaded_chunks", "mutable_buffer", 1.0);
|
||||
assert_metric("catalog_loaded_chunks", "read_buffer", 0.0);
|
||||
assert_metric("catalog_loaded_chunks", "object_store", 0.0);
|
||||
assert_metric("catalog_loaded_rows", "mutable_buffer", 1.0);
|
||||
assert_metric("catalog_loaded_rows", "read_buffer", 0.0);
|
||||
assert_metric("catalog_loaded_rows", "object_store", 0.0);
|
||||
|
||||
// verify chunk size updated
|
||||
catalog_chunk_size_bytes_metric_eq(&test_db.metric_registry, "mutable_buffer", 44).unwrap();
|
||||
|
||||
// write into same chunk again.
|
||||
write_lp(db.as_ref(), "cpu bar=2 10").await;
|
||||
write_lp(db.as_ref(), "cpu bar=2 20").await;
|
||||
|
||||
// verify chunk size updated
|
||||
catalog_chunk_size_bytes_metric_eq(&test_db.metric_registry, "mutable_buffer", 60).unwrap();
|
||||
|
@ -1753,6 +1775,13 @@ mod tests {
|
|||
.eq(1.0)
|
||||
.unwrap();
|
||||
|
||||
assert_metric("catalog_loaded_chunks", "mutable_buffer", 1.0);
|
||||
assert_metric("catalog_loaded_chunks", "read_buffer", 0.0);
|
||||
assert_metric("catalog_loaded_chunks", "object_store", 0.0);
|
||||
assert_metric("catalog_loaded_rows", "mutable_buffer", 2.0);
|
||||
assert_metric("catalog_loaded_rows", "read_buffer", 0.0);
|
||||
assert_metric("catalog_loaded_rows", "object_store", 0.0);
|
||||
|
||||
db.rollover_partition("cpu", "1970-01-01T00").await.unwrap();
|
||||
|
||||
// A chunk is now closed
|
||||
|
@ -1768,6 +1797,13 @@ mod tests {
|
|||
.eq(1.0)
|
||||
.unwrap();
|
||||
|
||||
assert_metric("catalog_loaded_chunks", "mutable_buffer", 1.0);
|
||||
assert_metric("catalog_loaded_chunks", "read_buffer", 0.0);
|
||||
assert_metric("catalog_loaded_chunks", "object_store", 0.0);
|
||||
assert_metric("catalog_loaded_rows", "mutable_buffer", 2.0);
|
||||
assert_metric("catalog_loaded_rows", "read_buffer", 0.0);
|
||||
assert_metric("catalog_loaded_rows", "object_store", 0.0);
|
||||
|
||||
catalog_chunk_size_bytes_metric_eq(&test_db.metric_registry, "mutable_buffer", 1239)
|
||||
.unwrap();
|
||||
|
||||
|
@ -1788,9 +1824,16 @@ mod tests {
|
|||
.eq(1.0)
|
||||
.unwrap();
|
||||
|
||||
assert_metric("catalog_loaded_chunks", "mutable_buffer", 0.0);
|
||||
assert_metric("catalog_loaded_chunks", "read_buffer", 1.0);
|
||||
assert_metric("catalog_loaded_chunks", "object_store", 0.0);
|
||||
assert_metric("catalog_loaded_rows", "mutable_buffer", 0.0);
|
||||
assert_metric("catalog_loaded_rows", "read_buffer", 2.0);
|
||||
assert_metric("catalog_loaded_rows", "object_store", 0.0);
|
||||
|
||||
// verify chunk size updated (chunk moved from closing to moving to moved)
|
||||
catalog_chunk_size_bytes_metric_eq(&test_db.metric_registry, "mutable_buffer", 0).unwrap();
|
||||
let expected_read_buffer_size = 1611;
|
||||
let expected_read_buffer_size = 1613;
|
||||
catalog_chunk_size_bytes_metric_eq(
|
||||
&test_db.metric_registry,
|
||||
"read_buffer",
|
||||
|
@ -1829,11 +1872,18 @@ mod tests {
|
|||
// now also in OS
|
||||
catalog_chunk_size_bytes_metric_eq(
|
||||
&test_db.metric_registry,
|
||||
"parquet",
|
||||
"object_store",
|
||||
expected_parquet_size,
|
||||
)
|
||||
.unwrap(); // TODO: #1311
|
||||
|
||||
assert_metric("catalog_loaded_chunks", "mutable_buffer", 0.0);
|
||||
assert_metric("catalog_loaded_chunks", "read_buffer", 1.0);
|
||||
assert_metric("catalog_loaded_chunks", "object_store", 1.0);
|
||||
assert_metric("catalog_loaded_rows", "mutable_buffer", 0.0);
|
||||
assert_metric("catalog_loaded_rows", "read_buffer", 2.0);
|
||||
assert_metric("catalog_loaded_rows", "object_store", 2.0);
|
||||
|
||||
db.unload_read_buffer("cpu", "1970-01-01T00", 1).unwrap();
|
||||
|
||||
// A chunk is now now in the "os-only" state.
|
||||
|
@ -1845,10 +1895,17 @@ mod tests {
|
|||
.eq(1.0)
|
||||
.unwrap();
|
||||
|
||||
assert_metric("catalog_loaded_chunks", "mutable_buffer", 0.0);
|
||||
assert_metric("catalog_loaded_chunks", "read_buffer", 0.0);
|
||||
assert_metric("catalog_loaded_chunks", "object_store", 1.0);
|
||||
assert_metric("catalog_loaded_rows", "mutable_buffer", 0.0);
|
||||
assert_metric("catalog_loaded_rows", "read_buffer", 0.0);
|
||||
assert_metric("catalog_loaded_rows", "object_store", 2.0);
|
||||
|
||||
// verify chunk size not increased for OS (it was in OS before unload)
|
||||
catalog_chunk_size_bytes_metric_eq(
|
||||
&test_db.metric_registry,
|
||||
"parquet",
|
||||
"object_store",
|
||||
expected_parquet_size,
|
||||
)
|
||||
.unwrap();
|
||||
|
@ -2845,7 +2902,7 @@ mod tests {
|
|||
|
||||
assert_eq!(db.catalog.metrics().memory().mutable_buffer(), 2398 + 87);
|
||||
assert_eq!(db.catalog.metrics().memory().read_buffer(), 2410);
|
||||
assert_eq!(db.catalog.metrics().memory().parquet(), 826);
|
||||
assert_eq!(db.catalog.metrics().memory().object_store(), 826);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::db::catalog::metrics::MemoryMetrics;
|
||||
use crate::db::catalog::metrics::StorageGauge;
|
||||
use chrono::{DateTime, Utc};
|
||||
use data_types::instant::to_approximate_datetime;
|
||||
use data_types::{
|
||||
|
@ -228,7 +228,14 @@ macro_rules! unexpected_state {
|
|||
pub struct ChunkMetrics {
|
||||
pub(super) state: Counter,
|
||||
pub(super) immutable_chunk_size: Histogram,
|
||||
pub(super) memory_metrics: MemoryMetrics,
|
||||
/// Chunk storage metrics
|
||||
pub(super) chunk_storage: StorageGauge,
|
||||
|
||||
/// Chunk row count metrics
|
||||
pub(super) row_count: StorageGauge,
|
||||
|
||||
/// Catalog memory metrics
|
||||
pub(super) memory_metrics: StorageGauge,
|
||||
}
|
||||
|
||||
impl ChunkMetrics {
|
||||
|
@ -240,7 +247,9 @@ impl ChunkMetrics {
|
|||
Self {
|
||||
state: Counter::new_unregistered(),
|
||||
immutable_chunk_size: Histogram::new_unregistered(),
|
||||
memory_metrics: MemoryMetrics::new_unregistered(),
|
||||
chunk_storage: StorageGauge::new_unregistered(),
|
||||
row_count: StorageGauge::new_unregistered(),
|
||||
memory_metrics: StorageGauge::new_unregistered(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -275,7 +284,7 @@ impl CatalogChunk {
|
|||
time_of_last_write: Some(last_write),
|
||||
time_closed: None,
|
||||
};
|
||||
chunk.update_memory_metrics();
|
||||
chunk.update_metrics();
|
||||
chunk
|
||||
}
|
||||
|
||||
|
@ -312,7 +321,7 @@ impl CatalogChunk {
|
|||
time_of_last_write: None,
|
||||
time_closed: None,
|
||||
};
|
||||
chunk.update_memory_metrics();
|
||||
chunk.update_metrics();
|
||||
chunk
|
||||
}
|
||||
|
||||
|
@ -347,7 +356,7 @@ impl CatalogChunk {
|
|||
time_of_last_write: None,
|
||||
time_closed: None,
|
||||
};
|
||||
chunk.update_memory_metrics();
|
||||
chunk.update_metrics();
|
||||
chunk
|
||||
}
|
||||
|
||||
|
@ -398,42 +407,49 @@ impl CatalogChunk {
|
|||
self.time_closed
|
||||
}
|
||||
|
||||
/// Updates `self.memory_metrics` to match the contents of `self.stage`
|
||||
fn update_memory_metrics(&mut self) {
|
||||
/// Updates `self.metrics` to match the contents of `self.stage`
|
||||
fn update_metrics(&mut self) {
|
||||
match &self.stage {
|
||||
ChunkStage::Open { mb_chunk } => {
|
||||
self.metrics
|
||||
.memory_metrics
|
||||
.mutable_buffer
|
||||
.set(mb_chunk.size());
|
||||
self.metrics.memory_metrics.read_buffer.set(0);
|
||||
self.metrics.memory_metrics.parquet.set(0);
|
||||
self.metrics.memory_metrics.set_mub_only(mb_chunk.size());
|
||||
self.metrics.row_count.set_mub_only(mb_chunk.rows());
|
||||
self.metrics.chunk_storage.set_mub_only(1);
|
||||
}
|
||||
ChunkStage::Frozen { representation, .. } => match representation {
|
||||
ChunkStageFrozenRepr::MutableBufferSnapshot(snapshot) => {
|
||||
self.metrics
|
||||
.memory_metrics
|
||||
.mutable_buffer
|
||||
.set(snapshot.size());
|
||||
self.metrics.memory_metrics.read_buffer.set(0);
|
||||
self.metrics.memory_metrics.parquet.set(0);
|
||||
self.metrics.memory_metrics.set_mub_only(snapshot.size());
|
||||
self.metrics.row_count.set_mub_only(snapshot.rows());
|
||||
self.metrics.chunk_storage.set_mub_only(1);
|
||||
}
|
||||
ChunkStageFrozenRepr::ReadBuffer(rb_chunk) => {
|
||||
self.metrics.memory_metrics.mutable_buffer.set(0);
|
||||
self.metrics.memory_metrics.read_buffer.set(rb_chunk.size());
|
||||
self.metrics.memory_metrics.parquet.set(0);
|
||||
self.metrics.memory_metrics.set_rub_only(rb_chunk.size());
|
||||
self.metrics
|
||||
.row_count
|
||||
.set_rub_only(rb_chunk.rows() as usize);
|
||||
self.metrics.chunk_storage.set_rub_only(1);
|
||||
}
|
||||
},
|
||||
ChunkStage::Persisted {
|
||||
parquet,
|
||||
read_buffer,
|
||||
read_buffer: Some(read_buffer),
|
||||
..
|
||||
} => {
|
||||
let rub_size = read_buffer.as_ref().map(|x| x.size()).unwrap_or(0);
|
||||
|
||||
self.metrics.memory_metrics.mutable_buffer.set(0);
|
||||
self.metrics.memory_metrics.read_buffer.set(rub_size);
|
||||
self.metrics.memory_metrics.parquet.set(parquet.size());
|
||||
self.metrics
|
||||
.memory_metrics
|
||||
.set_rub_and_object_store_only(read_buffer.size(), parquet.size());
|
||||
self.metrics
|
||||
.row_count
|
||||
.set_rub_and_object_store_only(read_buffer.rows() as usize, parquet.rows());
|
||||
self.metrics
|
||||
.chunk_storage
|
||||
.set_rub_and_object_store_only(1, 1);
|
||||
}
|
||||
ChunkStage::Persisted { parquet, .. } => {
|
||||
self.metrics
|
||||
.memory_metrics
|
||||
.set_object_store_only(parquet.size());
|
||||
self.metrics.row_count.set_object_store_only(parquet.rows());
|
||||
self.metrics.chunk_storage.set_object_store_only(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -446,7 +462,7 @@ impl CatalogChunk {
|
|||
self.time_of_first_write = Some(now);
|
||||
}
|
||||
self.time_of_last_write = Some(now);
|
||||
self.update_memory_metrics();
|
||||
self.update_metrics();
|
||||
}
|
||||
|
||||
/// Returns the storage and the number of rows
|
||||
|
@ -622,7 +638,7 @@ impl CatalogChunk {
|
|||
representation: ChunkStageFrozenRepr::MutableBufferSnapshot(Arc::clone(&s)),
|
||||
meta: Arc::new(metadata),
|
||||
};
|
||||
self.update_memory_metrics();
|
||||
self.update_metrics();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -717,7 +733,7 @@ impl CatalogChunk {
|
|||
&[KeyValue::new("state", "moved")],
|
||||
);
|
||||
*representation = ChunkStageFrozenRepr::ReadBuffer(chunk);
|
||||
self.update_memory_metrics();
|
||||
self.update_metrics();
|
||||
self.finish_lifecycle_action(ChunkLifecycleAction::Moving)?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -796,7 +812,7 @@ impl CatalogChunk {
|
|||
parquet: chunk,
|
||||
read_buffer: Some(db),
|
||||
};
|
||||
self.update_memory_metrics();
|
||||
self.update_metrics();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -829,7 +845,7 @@ impl CatalogChunk {
|
|||
&[KeyValue::new("state", "os")],
|
||||
);
|
||||
|
||||
self.update_memory_metrics();
|
||||
self.update_metrics();
|
||||
|
||||
Ok(rub_chunk)
|
||||
} else {
|
||||
|
@ -857,7 +873,7 @@ impl CatalogChunk {
|
|||
// set memory metrics to 0 to stop accounting for this chunk within the catalog
|
||||
self.metrics.memory_metrics.mutable_buffer.set(0);
|
||||
self.metrics.memory_metrics.read_buffer.set(0);
|
||||
self.metrics.memory_metrics.parquet.set(0);
|
||||
self.metrics.memory_metrics.object_store.set(0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -915,7 +931,7 @@ impl CatalogChunk {
|
|||
// actions correctly. When clearing out that action, we need to restore the pre-action state. The easiest
|
||||
// (and stateless) way to to do that is just to call the update method. Since clearing lifecycle actions
|
||||
// should be a rather rare event, the cost of this is negligible.
|
||||
self.update_memory_metrics();
|
||||
self.update_metrics();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use crate::db::catalog::chunk::ChunkMetrics;
|
||||
use metrics::{Counter, GaugeValue, Histogram, KeyValue};
|
||||
use metrics::{Counter, Gauge, GaugeValue, Histogram, KeyValue};
|
||||
use std::sync::Arc;
|
||||
use tracker::{LockTracker, RwLock};
|
||||
|
||||
|
@ -9,19 +9,25 @@ pub struct CatalogMetrics {
|
|||
metrics_domain: Arc<metrics::Domain>,
|
||||
|
||||
/// Catalog memory metrics
|
||||
memory_metrics: MemoryMetrics,
|
||||
memory_metrics: StorageGauge,
|
||||
}
|
||||
|
||||
impl CatalogMetrics {
|
||||
pub fn new(metrics_domain: metrics::Domain) -> Self {
|
||||
let chunks_mem_usage = metrics_domain.register_gauge_metric(
|
||||
"chunks_mem_usage",
|
||||
Some("bytes"),
|
||||
"Memory usage by catalog chunks",
|
||||
);
|
||||
|
||||
Self {
|
||||
memory_metrics: MemoryMetrics::new(&metrics_domain),
|
||||
memory_metrics: StorageGauge::new(&chunks_mem_usage),
|
||||
metrics_domain: Arc::new(metrics_domain),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the memory metrics for the catalog
|
||||
pub fn memory(&self) -> &MemoryMetrics {
|
||||
pub fn memory(&self) -> &StorageGauge {
|
||||
&self.memory_metrics
|
||||
}
|
||||
|
||||
|
@ -56,8 +62,24 @@ impl CatalogMetrics {
|
|||
&chunk_lock_tracker,
|
||||
);
|
||||
|
||||
let storage_gauge = self.metrics_domain.register_gauge_metric_with_labels(
|
||||
"loaded",
|
||||
Some("chunks"),
|
||||
"The number of chunks loaded in a each chunk storage location",
|
||||
&[KeyValue::new("table", table_name.to_string())],
|
||||
);
|
||||
|
||||
let row_gauge = self.metrics_domain.register_gauge_metric_with_labels(
|
||||
"loaded",
|
||||
Some("rows"),
|
||||
"The number of rows loaded in each chunk storage location",
|
||||
&[KeyValue::new("table", table_name.to_string())],
|
||||
);
|
||||
|
||||
TableMetrics {
|
||||
metrics_domain: Arc::clone(&self.metrics_domain),
|
||||
chunk_storage: StorageGauge::new(&storage_gauge),
|
||||
row_count: StorageGauge::new(&row_gauge),
|
||||
memory_metrics: self.memory_metrics.clone_empty(),
|
||||
table_lock_tracker,
|
||||
partition_lock_tracker,
|
||||
|
@ -71,8 +93,14 @@ pub struct TableMetrics {
|
|||
/// Metrics domain
|
||||
metrics_domain: Arc<metrics::Domain>,
|
||||
|
||||
/// Chunk storage metrics
|
||||
chunk_storage: StorageGauge,
|
||||
|
||||
/// Chunk row count metrics
|
||||
row_count: StorageGauge,
|
||||
|
||||
/// Catalog memory metrics
|
||||
memory_metrics: MemoryMetrics,
|
||||
memory_metrics: StorageGauge,
|
||||
|
||||
/// Lock tracker for table-level locks
|
||||
table_lock_tracker: LockTracker,
|
||||
|
@ -96,6 +124,8 @@ impl TableMetrics {
|
|||
pub(super) fn new_partition_metrics(&self) -> PartitionMetrics {
|
||||
// Lock tracker for chunk-level locks
|
||||
PartitionMetrics {
|
||||
chunk_storage: self.chunk_storage.clone_empty(),
|
||||
row_count: self.row_count.clone_empty(),
|
||||
memory_metrics: self.memory_metrics.clone_empty(),
|
||||
chunk_state: self.metrics_domain.register_counter_metric_with_labels(
|
||||
"chunks",
|
||||
|
@ -119,8 +149,14 @@ impl TableMetrics {
|
|||
|
||||
#[derive(Debug)]
|
||||
pub struct PartitionMetrics {
|
||||
/// Chunk storage metrics
|
||||
chunk_storage: StorageGauge,
|
||||
|
||||
/// Chunk row count metrics
|
||||
row_count: StorageGauge,
|
||||
|
||||
/// Catalog memory metrics
|
||||
memory_metrics: MemoryMetrics,
|
||||
memory_metrics: StorageGauge,
|
||||
|
||||
chunk_state: Counter,
|
||||
|
||||
|
@ -139,65 +175,92 @@ impl PartitionMetrics {
|
|||
ChunkMetrics {
|
||||
state: self.chunk_state.clone(),
|
||||
immutable_chunk_size: self.immutable_chunk_size.clone(),
|
||||
chunk_storage: self.chunk_storage.clone_empty(),
|
||||
row_count: self.row_count.clone_empty(),
|
||||
memory_metrics: self.memory_metrics.clone_empty(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Created from a `metrics::Gauge` and extracts a `GaugeValue` for each chunk storage
|
||||
///
|
||||
/// This can then be used within each `CatalogChunk` to record its observations for
|
||||
/// the different storages
|
||||
#[derive(Debug)]
|
||||
pub struct MemoryMetrics {
|
||||
pub struct StorageGauge {
|
||||
pub(super) mutable_buffer: GaugeValue,
|
||||
pub(super) read_buffer: GaugeValue,
|
||||
pub(super) parquet: GaugeValue,
|
||||
pub(super) object_store: GaugeValue,
|
||||
}
|
||||
|
||||
impl MemoryMetrics {
|
||||
pub fn new_unregistered() -> Self {
|
||||
impl StorageGauge {
|
||||
pub(super) fn new_unregistered() -> Self {
|
||||
Self {
|
||||
mutable_buffer: GaugeValue::new_unregistered(),
|
||||
read_buffer: GaugeValue::new_unregistered(),
|
||||
parquet: GaugeValue::new_unregistered(),
|
||||
object_store: GaugeValue::new_unregistered(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(metrics_domain: &metrics::Domain) -> Self {
|
||||
let gauge = metrics_domain.register_gauge_metric(
|
||||
"chunks_mem_usage",
|
||||
Some("bytes"),
|
||||
"Memory usage by catalog chunks",
|
||||
);
|
||||
|
||||
pub(super) fn new(gauge: &Gauge) -> Self {
|
||||
Self {
|
||||
mutable_buffer: gauge.gauge_value(&[KeyValue::new("source", "mutable_buffer")]),
|
||||
read_buffer: gauge.gauge_value(&[KeyValue::new("source", "read_buffer")]),
|
||||
parquet: gauge.gauge_value(&[KeyValue::new("source", "parquet")]),
|
||||
mutable_buffer: gauge.gauge_value(&[KeyValue::new("location", "mutable_buffer")]),
|
||||
read_buffer: gauge.gauge_value(&[KeyValue::new("location", "read_buffer")]),
|
||||
object_store: gauge.gauge_value(&[KeyValue::new("location", "object_store")]),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn set_mub_only(&mut self, value: usize) {
|
||||
self.mutable_buffer.set(value);
|
||||
self.read_buffer.set(0);
|
||||
self.object_store.set(0);
|
||||
}
|
||||
|
||||
pub(super) fn set_rub_only(&mut self, value: usize) {
|
||||
self.mutable_buffer.set(0);
|
||||
self.read_buffer.set(value);
|
||||
self.object_store.set(0);
|
||||
}
|
||||
|
||||
pub(super) fn set_rub_and_object_store_only(&mut self, rub: usize, parquet: usize) {
|
||||
self.mutable_buffer.set(0);
|
||||
self.read_buffer.set(rub);
|
||||
self.object_store.set(parquet);
|
||||
}
|
||||
|
||||
pub(super) fn set_object_store_only(&mut self, value: usize) {
|
||||
self.mutable_buffer.set(0);
|
||||
self.read_buffer.set(0);
|
||||
self.object_store.set(value);
|
||||
}
|
||||
|
||||
fn clone_empty(&self) -> Self {
|
||||
Self {
|
||||
mutable_buffer: self.mutable_buffer.clone_empty(),
|
||||
read_buffer: self.read_buffer.clone_empty(),
|
||||
parquet: self.parquet.clone_empty(),
|
||||
object_store: self.object_store.clone_empty(),
|
||||
}
|
||||
}
|
||||
/// Returns the size of the mutable buffer
|
||||
|
||||
/// Returns the total for the mutable buffer
|
||||
pub fn mutable_buffer(&self) -> usize {
|
||||
self.mutable_buffer.get_total()
|
||||
}
|
||||
|
||||
/// Returns the size of the mutable buffer
|
||||
/// Returns the total for the read buffer
|
||||
pub fn read_buffer(&self) -> usize {
|
||||
self.read_buffer.get_total()
|
||||
}
|
||||
|
||||
/// Returns the amount of data in parquet
|
||||
pub fn parquet(&self) -> usize {
|
||||
self.parquet.get_total()
|
||||
/// Returns the total for object storage
|
||||
pub fn object_store(&self) -> usize {
|
||||
self.object_store.get_total()
|
||||
}
|
||||
|
||||
/// Total bytes over all registries.
|
||||
/// Returns the total over all storages
|
||||
pub fn total(&self) -> usize {
|
||||
self.mutable_buffer.get_total() + self.read_buffer.get_total() + self.parquet.get_total()
|
||||
self.mutable_buffer.get_total()
|
||||
+ self.read_buffer.get_total()
|
||||
+ self.object_store.get_total()
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue