fix: Avoid an unnecessary parsing of iox metadata
In one case where ParquetChunk::new was being called, the calling code had just parsed the IoxMetadata too. In the other case, the calling code had just *created* the IoxMetadata being parsed. In both cases, this re-parsing wasn't actually needed; the two bits of info ParquetChunk::new can be easily passed in.pull/24376/head
parent
a1e2ce77f0
commit
ad0a9549de
|
@ -1,7 +1,4 @@
|
|||
use crate::{
|
||||
metadata::{IoxMetadata, IoxParquetMetaData},
|
||||
storage::Storage,
|
||||
};
|
||||
use crate::{metadata::IoxParquetMetaData, storage::Storage};
|
||||
use data_types::{
|
||||
partition_metadata::{Statistics, TableSummary},
|
||||
timestamp::TimestampRange,
|
||||
|
@ -118,20 +115,10 @@ impl ParquetChunk {
|
|||
store: Arc<ObjectStore>,
|
||||
file_size_bytes: usize,
|
||||
parquet_metadata: Arc<IoxParquetMetaData>,
|
||||
table_name: Arc<str>,
|
||||
partition_key: Arc<str>,
|
||||
metrics: ChunkMetrics,
|
||||
) -> Result<Self> {
|
||||
let iox_md = parquet_metadata
|
||||
.read_iox_metadata()
|
||||
.context(IoxMetadataReadFailed {
|
||||
path: &file_location,
|
||||
})?;
|
||||
|
||||
let IoxMetadata {
|
||||
table_name,
|
||||
partition_key,
|
||||
..
|
||||
} = iox_md;
|
||||
|
||||
let schema = parquet_metadata.read_schema().context(SchemaReadFailed {
|
||||
path: &file_location,
|
||||
})?;
|
||||
|
|
|
@ -53,6 +53,8 @@ pub(super) fn write_chunk_to_object_store(
|
|||
)> {
|
||||
let db = Arc::clone(&chunk.data().db);
|
||||
let addr = chunk.addr().clone();
|
||||
let table_name = Arc::clone(&addr.table_name);
|
||||
let partition_key = Arc::clone(&addr.partition_key);
|
||||
|
||||
let (tracker, registration) = db.jobs.register(Job::WriteChunk {
|
||||
chunk: addr.clone(),
|
||||
|
@ -116,8 +118,8 @@ pub(super) fn write_chunk_to_object_store(
|
|||
// between creation and the transaction commit.
|
||||
let metadata = IoxMetadata {
|
||||
creation_timestamp: Utc::now(),
|
||||
table_name: Arc::clone(&addr.table_name),
|
||||
partition_key: Arc::clone(&addr.partition_key),
|
||||
table_name: Arc::clone(&table_name),
|
||||
partition_key: Arc::clone(&partition_key),
|
||||
chunk_id: addr.chunk_id,
|
||||
partition_checkpoint,
|
||||
database_checkpoint,
|
||||
|
@ -141,6 +143,8 @@ pub(super) fn write_chunk_to_object_store(
|
|||
Arc::clone(&db.store),
|
||||
file_size_bytes,
|
||||
Arc::clone(&parquet_metadata),
|
||||
Arc::clone(&table_name),
|
||||
Arc::clone(&partition_key),
|
||||
metrics,
|
||||
)
|
||||
.context(ParquetChunkError)?,
|
||||
|
|
|
@ -210,6 +210,8 @@ impl CatalogState for Loader {
|
|||
object_store,
|
||||
info.file_size_bytes,
|
||||
info.metadata,
|
||||
Arc::clone(&iox_md.table_name),
|
||||
Arc::clone(&iox_md.partition_key),
|
||||
metrics,
|
||||
)
|
||||
.context(ChunkCreationFailed {
|
||||
|
|
Loading…
Reference in New Issue