diff --git a/server/src/db/system_tables.rs b/server/src/db/system_tables.rs index 0050df1c3a..c8ef305c06 100644 --- a/server/src/db/system_tables.rs +++ b/server/src/db/system_tables.rs @@ -14,7 +14,6 @@ use arrow::{ error::Result, record_batch::RecordBatch, }; -use chrono::{DateTime, Utc}; use datafusion::{ catalog::schema::SchemaProvider, datasource::{datasource::Statistics, TableProvider}, @@ -153,11 +152,6 @@ where } } -// TODO: Use a custom proc macro or serde to reduce the boilerplate -fn time_to_ts(time: Option>) -> Option { - time.map(|ts| ts.timestamp_nanos()) -} - /// Creates a DataFusion ExecutionPlan node that scans a single batch /// of records. fn scan_batch( diff --git a/server/src/db/system_tables/chunks.rs b/server/src/db/system_tables/chunks.rs index be863f24b5..f719dbc005 100644 --- a/server/src/db/system_tables/chunks.rs +++ b/server/src/db/system_tables/chunks.rs @@ -1,13 +1,11 @@ -use crate::db::{ - catalog::Catalog, - system_tables::{time_to_ts, IoxSystemTable}, -}; +use crate::db::{catalog::Catalog, system_tables::IoxSystemTable}; use arrow::{ array::{StringArray, TimestampNanosecondArray, UInt32Array, UInt64Array}, datatypes::{DataType, Field, Schema, SchemaRef, TimeUnit}, error::Result, record_batch::RecordBatch, }; +use chrono::{DateTime, Utc}; use data_types::{chunk_metadata::ChunkSummary, error::ErrorLogger}; use std::sync::Arc; @@ -56,6 +54,11 @@ fn chunk_summaries_schema() -> SchemaRef { ])) } +// TODO: Use a custom proc macro or serde to reduce the boilerplate +fn time_to_ts(time: Option>) -> Option { + time.map(|ts| ts.timestamp_nanos()) +} + fn from_chunk_summaries(schema: SchemaRef, chunks: Vec) -> Result { let id = chunks.iter().map(|c| Some(c.id)).collect::(); let partition_key = chunks