Merge pull request #2794 from influxdata/crepererum/issue1846
refactor: remove `time_closed`pull/24376/head
commit
470de36f3b
|
@ -160,10 +160,6 @@ pub struct ChunkSummary {
|
|||
/// into IOx. Note due to the compaction, etc... this may not be the chunk
|
||||
/// that data was originally written into
|
||||
pub time_of_last_write: DateTime<Utc>,
|
||||
|
||||
/// Time at which this chunk was marked as closed. Note this is
|
||||
/// not the same as the timestamps on the data itself
|
||||
pub time_closed: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
/// Represents metadata about the physical storage of a column in a chunk
|
||||
|
|
|
@ -90,9 +90,8 @@ message Chunk {
|
|||
// that data was originally written into
|
||||
google.protobuf.Timestamp time_of_last_write = 6;
|
||||
|
||||
// Time at which this chunk was marked as closed. Note this is not
|
||||
// the same as the timestamps on the data itself
|
||||
google.protobuf.Timestamp time_closed = 7;
|
||||
// Was `time_closed`.
|
||||
reserved 7;
|
||||
|
||||
// Order of this chunk relative to other overlapping chunks.
|
||||
uint32 order = 13;
|
||||
|
|
|
@ -25,7 +25,6 @@ impl From<ChunkSummary> for management::Chunk {
|
|||
time_of_last_access,
|
||||
time_of_first_write,
|
||||
time_of_last_write,
|
||||
time_closed,
|
||||
order,
|
||||
} = summary;
|
||||
|
||||
|
@ -41,7 +40,6 @@ impl From<ChunkSummary> for management::Chunk {
|
|||
time_of_last_access: time_of_last_access.map(Into::into),
|
||||
time_of_first_write: Some(time_of_first_write.into()),
|
||||
time_of_last_write: Some(time_of_last_write.into()),
|
||||
time_closed: time_closed.map(Into::into),
|
||||
order: order.get(),
|
||||
}
|
||||
}
|
||||
|
@ -106,7 +104,6 @@ impl TryFrom<management::Chunk> for ChunkSummary {
|
|||
time_of_last_access,
|
||||
time_of_first_write,
|
||||
time_of_last_write,
|
||||
time_closed,
|
||||
order,
|
||||
} = proto;
|
||||
|
||||
|
@ -123,7 +120,6 @@ impl TryFrom<management::Chunk> for ChunkSummary {
|
|||
time_of_last_access: timestamp(time_of_last_access, "time_of_last_access")?,
|
||||
time_of_first_write: required_timestamp(time_of_first_write, "time_of_first_write")?,
|
||||
time_of_last_write: required_timestamp(time_of_last_write, "time_of_last_write")?,
|
||||
time_closed: timestamp(time_closed, "time_closed")?,
|
||||
order: ChunkOrder::new(order).ok_or_else(|| FieldViolation {
|
||||
field: "order".to_string(),
|
||||
description: "Order must be non-zero".to_string(),
|
||||
|
@ -188,7 +184,6 @@ mod test {
|
|||
lifecycle_action: management::ChunkLifecycleAction::Compacting.into(),
|
||||
time_of_first_write: Some(now.into()),
|
||||
time_of_last_write: Some(now.into()),
|
||||
time_closed: None,
|
||||
time_of_last_access: Some(pbjson_types::Timestamp {
|
||||
seconds: 50,
|
||||
nanos: 7,
|
||||
|
@ -208,7 +203,6 @@ mod test {
|
|||
lifecycle_action: Some(ChunkLifecycleAction::Compacting),
|
||||
time_of_first_write: now,
|
||||
time_of_last_write: now,
|
||||
time_closed: None,
|
||||
time_of_last_access: Some(Utc.timestamp_nanos(50_000_000_007)),
|
||||
order: ChunkOrder::new(5).unwrap(),
|
||||
};
|
||||
|
@ -234,7 +228,6 @@ mod test {
|
|||
lifecycle_action: Some(ChunkLifecycleAction::Persisting),
|
||||
time_of_first_write: now,
|
||||
time_of_last_write: now,
|
||||
time_closed: None,
|
||||
time_of_last_access: Some(Utc.timestamp_nanos(12_000_100_007)),
|
||||
order: ChunkOrder::new(5).unwrap(),
|
||||
};
|
||||
|
@ -252,7 +245,6 @@ mod test {
|
|||
lifecycle_action: management::ChunkLifecycleAction::Persisting.into(),
|
||||
time_of_first_write: Some(now.into()),
|
||||
time_of_last_write: Some(now.into()),
|
||||
time_closed: None,
|
||||
time_of_last_access: Some(pbjson_types::Timestamp {
|
||||
seconds: 12,
|
||||
nanos: 100_007,
|
||||
|
|
|
@ -2552,8 +2552,6 @@ mod tests {
|
|||
assert!(start < chunk.time_of_first_write());
|
||||
assert!(chunk.time_of_first_write() < after_data_load);
|
||||
assert!(chunk.time_of_first_write() == chunk.time_of_last_write());
|
||||
assert!(after_data_load < chunk.time_closed().unwrap());
|
||||
assert!(chunk.time_closed().unwrap() < after_rollover);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -2615,7 +2613,6 @@ mod tests {
|
|||
time_of_last_access: None,
|
||||
time_of_first_write: Utc.timestamp_nanos(1),
|
||||
time_of_last_write: Utc.timestamp_nanos(1),
|
||||
time_closed: None,
|
||||
order: ChunkOrder::new(5).unwrap(),
|
||||
}];
|
||||
|
||||
|
@ -2648,9 +2645,7 @@ mod tests {
|
|||
let t_second_write = Utc::now();
|
||||
write_lp_with_time(&db, "cpu bar=2 2", t_second_write).await;
|
||||
|
||||
let t_close_before = Utc::now();
|
||||
db.rollover_partition("cpu", "1970-01-01T00").await.unwrap();
|
||||
let t_close_after = Utc::now();
|
||||
|
||||
let mut chunk_summaries = db.chunk_summaries().unwrap();
|
||||
|
||||
|
@ -2659,8 +2654,6 @@ mod tests {
|
|||
let summary = &chunk_summaries[0];
|
||||
assert_eq!(summary.time_of_first_write, t_first_write);
|
||||
assert_eq!(summary.time_of_last_write, t_second_write);
|
||||
assert!(t_close_before <= summary.time_closed.unwrap());
|
||||
assert!(summary.time_closed.unwrap() <= t_close_after);
|
||||
}
|
||||
|
||||
fn assert_first_last_times_eq(chunk_summary: &ChunkSummary, expected: DateTime<Utc>) {
|
||||
|
@ -2853,7 +2846,6 @@ mod tests {
|
|||
time_of_last_access: None,
|
||||
time_of_first_write: Utc.timestamp_nanos(1),
|
||||
time_of_last_write: Utc.timestamp_nanos(1),
|
||||
time_closed: None,
|
||||
},
|
||||
ChunkSummary {
|
||||
partition_key: Arc::from("1970-01-05T15"),
|
||||
|
@ -2868,7 +2860,6 @@ mod tests {
|
|||
time_of_last_access: None,
|
||||
time_of_first_write: Utc.timestamp_nanos(1),
|
||||
time_of_last_write: Utc.timestamp_nanos(1),
|
||||
time_closed: None,
|
||||
},
|
||||
ChunkSummary {
|
||||
partition_key: Arc::from("1970-01-05T15"),
|
||||
|
@ -2883,7 +2874,6 @@ mod tests {
|
|||
time_of_last_access: None,
|
||||
time_of_first_write: Utc.timestamp_nanos(1),
|
||||
time_of_last_write: Utc.timestamp_nanos(1),
|
||||
time_closed: None,
|
||||
},
|
||||
];
|
||||
|
||||
|
|
|
@ -220,10 +220,6 @@ pub struct CatalogChunk {
|
|||
/// that data was originally written into
|
||||
time_of_last_write: DateTime<Utc>,
|
||||
|
||||
/// Time at which this chunk was marked as closed. Note this is
|
||||
/// not the same as the timestamps on the data itself
|
||||
time_closed: Option<DateTime<Utc>>,
|
||||
|
||||
/// Order of this chunk relative to other overlapping chunks.
|
||||
order: ChunkOrder,
|
||||
}
|
||||
|
@ -293,7 +289,6 @@ impl CatalogChunk {
|
|||
access_recorder: Default::default(),
|
||||
time_of_first_write: time_of_write,
|
||||
time_of_last_write: time_of_write,
|
||||
time_closed: None,
|
||||
order,
|
||||
};
|
||||
chunk.update_metrics();
|
||||
|
@ -331,7 +326,6 @@ impl CatalogChunk {
|
|||
access_recorder: Default::default(),
|
||||
time_of_first_write,
|
||||
time_of_last_write,
|
||||
time_closed: None,
|
||||
order,
|
||||
};
|
||||
chunk.update_metrics();
|
||||
|
@ -372,7 +366,6 @@ impl CatalogChunk {
|
|||
access_recorder: Default::default(),
|
||||
time_of_first_write,
|
||||
time_of_last_write,
|
||||
time_closed: None,
|
||||
order,
|
||||
};
|
||||
chunk.update_metrics();
|
||||
|
@ -422,10 +415,6 @@ impl CatalogChunk {
|
|||
self.time_of_last_write
|
||||
}
|
||||
|
||||
pub fn time_closed(&self) -> Option<DateTime<Utc>> {
|
||||
self.time_closed
|
||||
}
|
||||
|
||||
pub fn order(&self) -> ChunkOrder {
|
||||
self.order
|
||||
}
|
||||
|
@ -590,7 +579,6 @@ impl CatalogChunk {
|
|||
time_of_last_access,
|
||||
time_of_first_write: self.time_of_first_write,
|
||||
time_of_last_write: self.time_of_last_write,
|
||||
time_closed: self.time_closed,
|
||||
order: self.order,
|
||||
}
|
||||
}
|
||||
|
@ -693,9 +681,6 @@ impl CatalogChunk {
|
|||
match &self.stage {
|
||||
ChunkStage::Open { mb_chunk, .. } => {
|
||||
debug!(%self.addr, row_count=mb_chunk.rows(), "freezing chunk");
|
||||
assert!(self.time_closed.is_none());
|
||||
|
||||
self.time_closed = Some(Utc::now());
|
||||
let (s, _) = mb_chunk.snapshot();
|
||||
|
||||
// Cache table summary + schema
|
||||
|
@ -946,10 +931,8 @@ mod tests {
|
|||
let mut chunk = make_open_chunk();
|
||||
let registration = TaskRegistration::new();
|
||||
|
||||
assert!(chunk.time_closed.is_none());
|
||||
assert!(matches!(chunk.stage, ChunkStage::Open { .. }));
|
||||
chunk.set_compacting(®istration).unwrap();
|
||||
assert!(chunk.time_closed.is_some());
|
||||
assert!(matches!(chunk.stage, ChunkStage::Frozen { .. }));
|
||||
}
|
||||
|
||||
|
|
|
@ -49,8 +49,7 @@ fn chunk_summaries_schema() -> SchemaRef {
|
|||
Field::new("row_count", DataType::UInt64, false),
|
||||
Field::new("time_of_last_access", ts.clone(), true),
|
||||
Field::new("time_of_first_write", ts.clone(), false),
|
||||
Field::new("time_of_last_write", ts.clone(), false),
|
||||
Field::new("time_closed", ts, true),
|
||||
Field::new("time_of_last_write", ts, false),
|
||||
Field::new("order", DataType::UInt32, false),
|
||||
]))
|
||||
}
|
||||
|
@ -112,11 +111,6 @@ fn from_chunk_summaries(schema: SchemaRef, chunks: Vec<ChunkSummary>) -> Result<
|
|||
.map(|c| c.time_of_last_write)
|
||||
.map(time_to_ts)
|
||||
.collect::<TimestampNanosecondArray>();
|
||||
let time_closed = chunks
|
||||
.iter()
|
||||
.map(|c| c.time_closed)
|
||||
.map(optional_time_to_ts)
|
||||
.collect::<TimestampNanosecondArray>();
|
||||
let order = chunks
|
||||
.iter()
|
||||
.map(|c| Some(c.order.get()))
|
||||
|
@ -136,7 +130,6 @@ fn from_chunk_summaries(schema: SchemaRef, chunks: Vec<ChunkSummary>) -> Result<
|
|||
Arc::new(time_of_last_access),
|
||||
Arc::new(time_of_first_write),
|
||||
Arc::new(time_of_last_write),
|
||||
Arc::new(time_closed),
|
||||
Arc::new(order),
|
||||
],
|
||||
)
|
||||
|
@ -164,7 +157,6 @@ mod tests {
|
|||
time_of_last_access: None,
|
||||
time_of_first_write: Utc.timestamp_nanos(10_000_000_000),
|
||||
time_of_last_write: Utc.timestamp_nanos(10_000_000_000),
|
||||
time_closed: None,
|
||||
order: ChunkOrder::new(5).unwrap(),
|
||||
},
|
||||
ChunkSummary {
|
||||
|
@ -179,7 +171,6 @@ mod tests {
|
|||
time_of_last_access: Some(Utc.timestamp_nanos(754_000_000_000)),
|
||||
time_of_first_write: Utc.timestamp_nanos(80_000_000_000),
|
||||
time_of_last_write: Utc.timestamp_nanos(80_000_000_000),
|
||||
time_closed: None,
|
||||
order: ChunkOrder::new(6).unwrap(),
|
||||
},
|
||||
ChunkSummary {
|
||||
|
@ -194,19 +185,18 @@ mod tests {
|
|||
time_of_last_access: Some(Utc.timestamp_nanos(5_000_000_000)),
|
||||
time_of_first_write: Utc.timestamp_nanos(100_000_000_000),
|
||||
time_of_last_write: Utc.timestamp_nanos(200_000_000_000),
|
||||
time_closed: None,
|
||||
order: ChunkOrder::new(7).unwrap(),
|
||||
},
|
||||
];
|
||||
|
||||
let expected = vec![
|
||||
"+--------------------------------------+---------------+------------+-------------------+------------------------------+--------------+--------------------+-----------+----------------------+----------------------+----------------------+-------------+-------+",
|
||||
"| id | partition_key | table_name | storage | lifecycle_action | memory_bytes | object_store_bytes | row_count | time_of_last_access | time_of_first_write | time_of_last_write | time_closed | order |",
|
||||
"+--------------------------------------+---------------+------------+-------------------+------------------------------+--------------+--------------------+-----------+----------------------+----------------------+----------------------+-------------+-------+",
|
||||
"| 00000000-0000-0000-0000-000000000000 | p1 | table1 | OpenMutableBuffer | | 23754 | | 11 | | 1970-01-01T00:00:10Z | 1970-01-01T00:00:10Z | | 5 |",
|
||||
"| 00000000-0000-0000-0000-000000000001 | p1 | table1 | OpenMutableBuffer | Persisting to Object Storage | 23455 | | 22 | 1970-01-01T00:12:34Z | 1970-01-01T00:01:20Z | 1970-01-01T00:01:20Z | | 6 |",
|
||||
"| 00000000-0000-0000-0000-000000000002 | p1 | table1 | ObjectStoreOnly | | 1234 | 5678 | 33 | 1970-01-01T00:00:05Z | 1970-01-01T00:01:40Z | 1970-01-01T00:03:20Z | | 7 |",
|
||||
"+--------------------------------------+---------------+------------+-------------------+------------------------------+--------------+--------------------+-----------+----------------------+----------------------+----------------------+-------------+-------+",
|
||||
"+--------------------------------------+---------------+------------+-------------------+------------------------------+--------------+--------------------+-----------+----------------------+----------------------+----------------------+-------+",
|
||||
"| id | partition_key | table_name | storage | lifecycle_action | memory_bytes | object_store_bytes | row_count | time_of_last_access | time_of_first_write | time_of_last_write | order |",
|
||||
"+--------------------------------------+---------------+------------+-------------------+------------------------------+--------------+--------------------+-----------+----------------------+----------------------+----------------------+-------+",
|
||||
"| 00000000-0000-0000-0000-000000000000 | p1 | table1 | OpenMutableBuffer | | 23754 | | 11 | | 1970-01-01T00:00:10Z | 1970-01-01T00:00:10Z | 5 |",
|
||||
"| 00000000-0000-0000-0000-000000000001 | p1 | table1 | OpenMutableBuffer | Persisting to Object Storage | 23455 | | 22 | 1970-01-01T00:12:34Z | 1970-01-01T00:01:20Z | 1970-01-01T00:01:20Z | 6 |",
|
||||
"| 00000000-0000-0000-0000-000000000002 | p1 | table1 | ObjectStoreOnly | | 1234 | 5678 | 33 | 1970-01-01T00:00:05Z | 1970-01-01T00:01:40Z | 1970-01-01T00:03:20Z | 7 |",
|
||||
"+--------------------------------------+---------------+------------+-------------------+------------------------------+--------------+--------------------+-----------+----------------------+----------------------+----------------------+-------+",
|
||||
];
|
||||
|
||||
let schema = chunk_summaries_schema();
|
||||
|
|
|
@ -320,7 +320,6 @@ mod tests {
|
|||
time_of_last_access: None,
|
||||
time_of_first_write: Utc.timestamp_nanos(1),
|
||||
time_of_last_write: Utc.timestamp_nanos(2),
|
||||
time_closed: None,
|
||||
order: ChunkOrder::new(5).unwrap(),
|
||||
},
|
||||
columns: vec![
|
||||
|
@ -357,7 +356,6 @@ mod tests {
|
|||
time_of_last_access: None,
|
||||
time_of_first_write: Utc.timestamp_nanos(1),
|
||||
time_of_last_write: Utc.timestamp_nanos(2),
|
||||
time_closed: None,
|
||||
order: ChunkOrder::new(6).unwrap(),
|
||||
},
|
||||
columns: vec![ChunkColumnSummary {
|
||||
|
@ -388,7 +386,6 @@ mod tests {
|
|||
time_of_last_access: None,
|
||||
time_of_first_write: Utc.timestamp_nanos(1),
|
||||
time_of_last_write: Utc.timestamp_nanos(2),
|
||||
time_closed: None,
|
||||
order: ChunkOrder::new(5).unwrap(),
|
||||
},
|
||||
columns: vec![ChunkColumnSummary {
|
||||
|
|
|
@ -511,9 +511,7 @@ async fn test_chunk_get() {
|
|||
|
||||
// make sure there were timestamps prior to normalization
|
||||
assert!(
|
||||
chunks[0].time_of_first_write.is_some()
|
||||
&& chunks[0].time_of_last_write.is_some()
|
||||
&& chunks[0].time_closed.is_none(), // chunk is not yet closed
|
||||
chunks[0].time_of_first_write.is_some() && chunks[0].time_of_last_write.is_some(), // chunk is not yet closed
|
||||
"actual:{:#?}",
|
||||
chunks[0]
|
||||
);
|
||||
|
@ -535,7 +533,6 @@ async fn test_chunk_get() {
|
|||
time_of_last_access: None,
|
||||
time_of_first_write: None,
|
||||
time_of_last_write: None,
|
||||
time_closed: None,
|
||||
order: 1,
|
||||
},
|
||||
Chunk {
|
||||
|
@ -550,7 +547,6 @@ async fn test_chunk_get() {
|
|||
time_of_last_access: None,
|
||||
time_of_first_write: None,
|
||||
time_of_last_write: None,
|
||||
time_closed: None,
|
||||
order: 1,
|
||||
},
|
||||
];
|
||||
|
@ -722,7 +718,6 @@ async fn test_list_partition_chunks() {
|
|||
time_of_last_access: None,
|
||||
time_of_first_write: None,
|
||||
time_of_last_write: None,
|
||||
time_closed: None,
|
||||
order: 1,
|
||||
}];
|
||||
|
||||
|
@ -1069,7 +1064,6 @@ fn normalize_chunks(chunks: Vec<Chunk>) -> Vec<Chunk> {
|
|||
time_of_last_access: None,
|
||||
time_of_first_write: None,
|
||||
time_of_last_write: None,
|
||||
time_closed: None,
|
||||
memory_bytes,
|
||||
object_store_bytes,
|
||||
order,
|
||||
|
|
Loading…
Reference in New Issue