From 6b907f94da22027ccd1abb33f9195f6652b3da20 Mon Sep 17 00:00:00 2001 From: Marco Neumann Date: Mon, 16 Aug 2021 15:31:05 +0200 Subject: [PATCH] test: simplify `fixture_replay_broken` --- tests/end_to_end_cases/scenario.rs | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/tests/end_to_end_cases/scenario.rs b/tests/end_to_end_cases/scenario.rs index 8876d54573..9964002ffa 100644 --- a/tests/end_to_end_cases/scenario.rs +++ b/tests/end_to_end_cases/scenario.rs @@ -1,5 +1,5 @@ use std::iter::once; -use std::time::{Duration, Instant}; +use std::time::Duration; use std::{convert::TryInto, str, u32}; use std::{sync::Arc, time::SystemTime}; @@ -705,28 +705,14 @@ pub async fn fixture_replay_broken(db_name: &str, kafka_connection: &str) -> Ser .await .unwrap(); - // wait for ingest - let t_0 = Instant::now(); - loop { - // use later partition here so that we can implicitely wait for both entries - if fixture - .management_client() - .get_partition(db_name, "partition_by_b") - .await - .is_ok() - { - break; - } - - assert!(t_0.elapsed() < Duration::from_secs(10)); - tokio::time::sleep(Duration::from_millis(100)).await; - } - + // wait for ingest, compaction and persistence wait_for_exact_chunk_states( &fixture, db_name, vec![ + // that's the single entry from partition a ChunkStorage::ReadBuffer, + // these are the two entries from partition b that got persisted due to the row limit ChunkStorage::ReadBufferAndObjectStore, ], Duration::from_secs(10),