feat: initial of write chunks to parquet files

pull/24376/head
Nga Tran 2021-03-30 09:56:53 -04:00
parent 6a48001d13
commit 1a0e698571
2 changed files with 28 additions and 2 deletions

View File

@ -380,6 +380,28 @@ impl Db {
Ok(DBChunk::snapshot(&chunk))
}
pub fn load_chunk_to_parquet(&self,
partition_key: &str,
chunk_id: u32,
) -> Result<Arc<DBChunk>> {
let chunk = {
let partition = self
.catalog
.valid_partition(partition_key)
.context(LoadingChunk {
partition_key,
chunk_id,
})?;
let partition = partition.read();
partition.chunk(chunk_id).context(LoadingChunk {
partition_key,
chunk_id,
})?
};
Ok(&chunk)
}
/// Returns the next write sequence number
pub fn next_sequence(&self) -> u64 {
self.sequence.fetch_add(1, Ordering::SeqCst)

View File

@ -7,7 +7,7 @@ use read_buffer::Database as ReadBufferDb;
use snafu::{ResultExt, Snafu};
use tracing::debug;
use std::sync::Arc;
use std::{path::Path, sync::Arc};
use super::{
pred::{to_mutable_buffer_predicate, to_read_buffer_predicate},
@ -68,7 +68,11 @@ pub enum DBChunk {
partition_key: Arc<String>,
chunk_id: u32,
},
ParquetFile, // TODO add appropriate type here
ParquetFile {
path: String, // name of the parquet file
partition_key: Arc<String>,
chunk_id: u32, // ID of the chunk stored in this parquet file
}
}
impl DBChunk {