chore: remove more dead code (#1760)

Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
pull/24376/head
Andrew Lamb 2021-06-18 17:28:22 -04:00 committed by GitHub
parent 6559a9e997
commit 258a6b1956
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 11 additions and 25 deletions

View File

@ -136,7 +136,7 @@ impl ParquetChunk {
Ok(Self::new_from_parts(
iox_md.partition_key,
Arc::new(table_summary),
Arc::new(schema),
schema,
file_location,
store,
parquet_metadata,
@ -202,22 +202,8 @@ impl ParquetChunk {
+ mem::size_of_val(&self.parquet_metadata)
}
/// Return possibly restricted Schema for this chunk
pub fn schema(&self, selection: Selection<'_>) -> Result<Schema> {
Ok(match selection {
Selection::All => self.schema.as_ref().clone(),
Selection::Some(columns) => {
let columns = self
.schema
.select_indicies(columns)
.context(SelectColumns)?;
self.schema.project_indices(&columns)
}
})
}
/// Infallably return the full schema (for all columns) for this chunk
pub fn full_schema(&self) -> Arc<Schema> {
pub fn schema(&self) -> Arc<Schema> {
Arc::clone(&self.schema)
}

View File

@ -265,7 +265,7 @@ impl IoxParquetMetaData {
}
/// Read IOx schema from parquet metadata.
pub fn read_schema(&self) -> Result<Schema> {
pub fn read_schema(&self) -> Result<Arc<Schema>> {
let file_metadata = self.md.file_metadata();
let arrow_schema = parquet_to_arrow_schema(
@ -279,7 +279,7 @@ impl IoxParquetMetaData {
let schema: Schema = arrow_schema_ref
.try_into()
.context(IoxFromArrowFailure {})?;
Ok(schema)
Ok(Arc::new(schema))
}
/// Read IOx statistics (including timestamp range) from parquet metadata.
@ -563,7 +563,7 @@ fn extract_iox_statistics(
mod tests {
use super::*;
use internal_types::{schema::TIME_COLUMN_NAME, selection::Selection};
use internal_types::schema::TIME_COLUMN_NAME;
use crate::test_utils::{
chunk_addr, load_parquet_from_store, make_chunk, make_chunk_no_row_group, make_object_store,
@ -579,7 +579,7 @@ mod tests {
// step 1: read back schema
let schema_actual = parquet_metadata.read_schema().unwrap();
let schema_expected = chunk.schema(Selection::All).unwrap();
let schema_expected = chunk.schema();
assert_eq!(schema_actual, schema_expected);
// step 2: read back statistics
@ -602,7 +602,7 @@ mod tests {
// step 1: read back schema
let schema_actual = parquet_metadata.read_schema().unwrap();
let schema_expected = chunk.schema(Selection::All).unwrap();
let schema_expected = chunk.schema();
assert_eq!(schema_actual, schema_expected);
// step 2: read back statistics
@ -623,7 +623,7 @@ mod tests {
// step 1: read back schema
let schema_actual = parquet_metadata.read_schema().unwrap();
let schema_expected = chunk.schema(Selection::All).unwrap();
let schema_expected = chunk.schema();
assert_eq!(schema_actual, schema_expected);
// step 2: reading back statistics fails
@ -646,7 +646,7 @@ mod tests {
// step 1: read back schema
let schema_actual = parquet_metadata.read_schema().unwrap();
let schema_expected = chunk.schema(Selection::All).unwrap();
let schema_expected = chunk.schema();
assert_eq!(schema_actual, schema_expected);
// step 2: reading back statistics fails

View File

@ -55,7 +55,7 @@ mod tests {
//
// 1. Check metadata at file level: Everything is correct
let schema_actual = parquet_metadata.read_schema().unwrap();
assert_eq!(schema.clone(), schema_actual);
assert_eq!(Arc::new(schema.clone()), schema_actual);
assert_eq!(
key_value_metadata.clone(),
schema_actual.as_arrow().metadata().clone()

View File

@ -280,7 +280,7 @@ impl CatalogChunk {
// Cache table summary + schema
let meta = Arc::new(ChunkMetadata {
table_summary: Arc::clone(chunk.table_summary()),
schema: chunk.full_schema(),
schema: chunk.schema(),
});
let stage = ChunkStage::Persisted {