refactor: Make `Db::mutable_buffer_chunks` and `Db::read_buffer_chunks` private (#1044)

Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
pull/24376/head
Andrew Lamb 2021-03-24 16:57:15 -04:00 committed by GitHub
parent 5f3fb35da1
commit 4cbf7c6ae5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 40 additions and 15 deletions

View File

@ -256,7 +256,7 @@ impl Db {
///
/// TODO: make this function non pub and use partition_summary
/// information in the query_tests
pub fn mutable_buffer_chunks(&self, partition_key: &str) -> Vec<Arc<DBChunk>> {
fn mutable_buffer_chunks(&self, partition_key: &str) -> Vec<Arc<DBChunk>> {
let chunks = if let Some(mutable_buffer) = self.mutable_buffer.as_ref() {
let open_chunk_id = mutable_buffer.open_chunk_id(partition_key);
@ -291,10 +291,7 @@ impl Db {
///
/// NOTE the results may contain partially loaded chunks. The catalog
/// should always be used to determine where to find each chunk
///
/// TODO: make this function non pub and use partition_summary
/// information in the query_tests
pub fn read_buffer_chunks(&self, partition_key: &str) -> Vec<Arc<DBChunk>> {
fn read_buffer_chunks(&self, partition_key: &str) -> Vec<Arc<DBChunk>> {
self.read_buffer
.chunk_ids(partition_key)
.into_iter()

View File

@ -6,7 +6,7 @@ use async_trait::async_trait;
use crate::db::Db;
use super::utils::make_db;
use super::utils::{count_mutable_buffer_chunks, count_read_buffer_chunks, make_db};
/// Holds a database and a description of how its data was configured
pub struct DBScenario {
@ -36,8 +36,8 @@ impl DBSetup for NoData {
// listing partitions (which may create an entry in a map)
// in an empty database
let db = make_db();
assert_eq!(db.mutable_buffer_chunks(partition_key).len(), 1); // only open chunk
assert_eq!(db.read_buffer_chunks(partition_key).len(), 0);
assert_eq!(count_mutable_buffer_chunks(&db), 0);
assert_eq!(count_read_buffer_chunks(&db), 0);
let scenario2 = DBScenario {
scenario_name: "New, Empty Database after partitions are listed".into(),
db,
@ -51,20 +51,20 @@ impl DBSetup for NoData {
// move data out of open chunk
assert_eq!(db.rollover_partition(partition_key).await.unwrap().id(), 0);
assert_eq!(db.mutable_buffer_chunks(partition_key).len(), 2);
assert_eq!(db.read_buffer_chunks(partition_key).len(), 0); // only open chunk
assert_eq!(count_mutable_buffer_chunks(&db), 2);
assert_eq!(count_read_buffer_chunks(&db), 0); // only open chunk
db.load_chunk_to_read_buffer(partition_key, 0)
.await
.unwrap();
assert_eq!(db.mutable_buffer_chunks(partition_key).len(), 1);
assert_eq!(db.read_buffer_chunks(partition_key).len(), 1); // only open chunk
assert_eq!(count_mutable_buffer_chunks(&db), 1);
assert_eq!(count_read_buffer_chunks(&db), 1); // only open chunk
db.drop_chunk(partition_key, 0).unwrap();
assert_eq!(db.mutable_buffer_chunks(partition_key).len(), 1);
assert_eq!(db.read_buffer_chunks(partition_key).len(), 0);
assert_eq!(count_mutable_buffer_chunks(&db), 1);
assert_eq!(count_read_buffer_chunks(&db), 0);
let scenario3 = DBScenario {
scenario_name: "Empty Database after drop chunk".into(),

View File

@ -1,5 +1,9 @@
use data_types::database_rules::DatabaseRules;
use data_types::{
chunk::{ChunkStorage, ChunkSummary},
database_rules::DatabaseRules,
};
use mutable_buffer::MutableBufferDb;
use query::Database;
use crate::{db::Db, JobRegistry};
use std::sync::Arc;
@ -15,3 +19,27 @@ pub fn make_db() -> Db {
Arc::new(JobRegistry::new()),
)
}
fn chunk_summary_iter(db: &Db) -> impl Iterator<Item = ChunkSummary> + '_ {
db.partition_keys()
.unwrap()
.into_iter()
.flat_map(move |partition_key| db.partition_chunk_summaries(&partition_key))
}
/// Returns the number of mutable buffer chunks in the specified database
pub fn count_mutable_buffer_chunks(db: &Db) -> usize {
chunk_summary_iter(db)
.filter(|s| {
s.storage == ChunkStorage::OpenMutableBuffer
|| s.storage == ChunkStorage::ClosedMutableBuffer
})
.count()
}
/// Returns the number of read buffer chunks in the specified database
pub fn count_read_buffer_chunks(db: &Db) -> usize {
chunk_summary_iter(db)
.filter(|s| s.storage == ChunkStorage::ReadBuffer)
.count()
}