refactor: address Andrew's comments
parent
2f82a9d670
commit
ff641e5638
|
@ -98,15 +98,6 @@ impl TableSummary {
|
|||
}
|
||||
}
|
||||
|
||||
/// Returns the primary key of this table
|
||||
pub fn primary_key(&self) -> Vec<String> {
|
||||
self.columns
|
||||
.iter()
|
||||
.filter(|c| c.key_part())
|
||||
.map(|c| c.name.clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns the total number of rows in the columns of this summary
|
||||
pub fn count(&self) -> u64 {
|
||||
// Assumes that all tables have the same number of rows, so
|
||||
|
@ -195,15 +186,6 @@ impl ColumnSummary {
|
|||
self.stats.type_name()
|
||||
}
|
||||
|
||||
/// Return true if this column is a part of the primary key which
|
||||
/// means it is either a tag or timestamp
|
||||
pub fn key_part(&self) -> bool {
|
||||
matches!(
|
||||
self.influxdb_type,
|
||||
Some(InfluxDbType::Tag) | Some(InfluxDbType::Timestamp)
|
||||
)
|
||||
}
|
||||
|
||||
/// Return size in bytes of this Column metadata (not the underlying column)
|
||||
pub fn size(&self) -> usize {
|
||||
mem::size_of::<Self>() + self.name.len() + self.stats.size()
|
||||
|
|
|
@ -58,11 +58,6 @@ pub enum Error {
|
|||
InternalPushdownPredicate {
|
||||
source: datafusion::error::DataFusionError,
|
||||
},
|
||||
|
||||
#[snafu(display("Internal error while looking for overlapped chunks '{}'", source,))]
|
||||
InternalSplitOvelappedChunks {
|
||||
source: datafusion::error::DataFusionError,
|
||||
},
|
||||
}
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
|
||||
|
@ -349,8 +344,6 @@ impl<C: PartitionChunk + 'static> Deduplicater<C> {
|
|||
chunks: Vec<Arc<C>>,
|
||||
predicate: Predicate,
|
||||
) -> Arc<dyn ExecutionPlan> {
|
||||
//predicate: Predicate,) -> std::result::Result<Arc<dyn ExecutionPlan>, DataFusionError> {
|
||||
|
||||
//finding overlapped chunks and put them into the right group
|
||||
self.split_overlapped_chunks(chunks.to_vec());
|
||||
|
||||
|
@ -403,7 +396,7 @@ impl<C: PartitionChunk + 'static> Deduplicater<C> {
|
|||
// There are still plan, add UnionExec
|
||||
if !plans.is_empty() {
|
||||
// final_plan = union_plan
|
||||
// ....
|
||||
panic!("Unexpected error: There should be only one output for scan plan");
|
||||
}
|
||||
|
||||
final_plan
|
||||
|
|
|
@ -49,12 +49,6 @@ pub struct ChunkMetadata {
|
|||
pub schema: Arc<Schema>,
|
||||
}
|
||||
|
||||
impl ChunkMetadata {
|
||||
pub fn primary_key(&self) -> Vec<String> {
|
||||
self.table_summary.primary_key()
|
||||
}
|
||||
}
|
||||
|
||||
/// Different memory representations of a frozen chunk.
|
||||
#[derive(Debug)]
|
||||
pub enum ChunkStageFrozenRepr {
|
||||
|
|
|
@ -165,10 +165,6 @@ impl DbChunk {
|
|||
})
|
||||
}
|
||||
|
||||
pub fn primary_key(&self) -> Vec<String> {
|
||||
self.meta.primary_key()
|
||||
}
|
||||
|
||||
/// Return the snapshot of the chunk with type ParquetFile
|
||||
/// This function should be only invoked when you know your chunk
|
||||
/// is ParquetFile type whose state is WrittenToObjectStore. The
|
||||
|
|
Loading…
Reference in New Issue