fix: Avoid an unnecessary parsing of iox metadata

In one case where ParquetChunk::new was being called, the calling code
had just parsed the IoxMetadata too. In the other case, the calling code
had just *created* the IoxMetadata being parsed. In both cases, this
re-parsing wasn't actually needed; the two bits of info
ParquetChunk::new can be easily passed in.
pull/24376/head
Carol (Nichols || Goulding) 2021-07-28 14:25:54 -04:00
parent a1e2ce77f0
commit ad0a9549de
3 changed files with 11 additions and 18 deletions

View File

@ -1,7 +1,4 @@
use crate::{ use crate::{metadata::IoxParquetMetaData, storage::Storage};
metadata::{IoxMetadata, IoxParquetMetaData},
storage::Storage,
};
use data_types::{ use data_types::{
partition_metadata::{Statistics, TableSummary}, partition_metadata::{Statistics, TableSummary},
timestamp::TimestampRange, timestamp::TimestampRange,
@ -118,20 +115,10 @@ impl ParquetChunk {
store: Arc<ObjectStore>, store: Arc<ObjectStore>,
file_size_bytes: usize, file_size_bytes: usize,
parquet_metadata: Arc<IoxParquetMetaData>, parquet_metadata: Arc<IoxParquetMetaData>,
table_name: Arc<str>,
partition_key: Arc<str>,
metrics: ChunkMetrics, metrics: ChunkMetrics,
) -> Result<Self> { ) -> Result<Self> {
let iox_md = parquet_metadata
.read_iox_metadata()
.context(IoxMetadataReadFailed {
path: &file_location,
})?;
let IoxMetadata {
table_name,
partition_key,
..
} = iox_md;
let schema = parquet_metadata.read_schema().context(SchemaReadFailed { let schema = parquet_metadata.read_schema().context(SchemaReadFailed {
path: &file_location, path: &file_location,
})?; })?;

View File

@ -53,6 +53,8 @@ pub(super) fn write_chunk_to_object_store(
)> { )> {
let db = Arc::clone(&chunk.data().db); let db = Arc::clone(&chunk.data().db);
let addr = chunk.addr().clone(); let addr = chunk.addr().clone();
let table_name = Arc::clone(&addr.table_name);
let partition_key = Arc::clone(&addr.partition_key);
let (tracker, registration) = db.jobs.register(Job::WriteChunk { let (tracker, registration) = db.jobs.register(Job::WriteChunk {
chunk: addr.clone(), chunk: addr.clone(),
@ -116,8 +118,8 @@ pub(super) fn write_chunk_to_object_store(
// between creation and the transaction commit. // between creation and the transaction commit.
let metadata = IoxMetadata { let metadata = IoxMetadata {
creation_timestamp: Utc::now(), creation_timestamp: Utc::now(),
table_name: Arc::clone(&addr.table_name), table_name: Arc::clone(&table_name),
partition_key: Arc::clone(&addr.partition_key), partition_key: Arc::clone(&partition_key),
chunk_id: addr.chunk_id, chunk_id: addr.chunk_id,
partition_checkpoint, partition_checkpoint,
database_checkpoint, database_checkpoint,
@ -141,6 +143,8 @@ pub(super) fn write_chunk_to_object_store(
Arc::clone(&db.store), Arc::clone(&db.store),
file_size_bytes, file_size_bytes,
Arc::clone(&parquet_metadata), Arc::clone(&parquet_metadata),
Arc::clone(&table_name),
Arc::clone(&partition_key),
metrics, metrics,
) )
.context(ParquetChunkError)?, .context(ParquetChunkError)?,

View File

@ -210,6 +210,8 @@ impl CatalogState for Loader {
object_store, object_store,
info.file_size_bytes, info.file_size_bytes,
info.metadata, info.metadata,
Arc::clone(&iox_md.table_name),
Arc::clone(&iox_md.partition_key),
metrics, metrics,
) )
.context(ChunkCreationFailed { .context(ChunkCreationFailed {