test: Make test outputs consistent with the ones we send back to client

pull/24376/head
Nga Tran 2021-10-22 12:19:30 -04:00
parent c0ef7e7225
commit f9dedd78da
2 changed files with 56 additions and 73 deletions

View File

@ -1,12 +1,8 @@
use arrow::datatypes::DataType;
use arrow_util::assert_batches_eq;
use datafusion::logical_plan::{col, lit};
use predicate::predicate::{Predicate, PredicateBuilder};
use query::{
exec::{
fieldlist::{Field, FieldList},
ExecutorType,
},
exec::fieldlist::{Field, FieldList},
frontend::influxrpc::InfluxRpcPlanner,
};
@ -153,82 +149,67 @@ async fn test_field_columns_with_ts_pred() {
#[tokio::test]
async fn test_field_name_plan() {
test_helpers::maybe_start_logging();
// Tests that the ordering that comes out is reasonable
let scenarios = OneMeasurementManyFields {}.make().await;
for scenario in scenarios {
let predicate = PredicateBuilder::default().timestamp_range(0, 200).build();
let predicate = PredicateBuilder::default().timestamp_range(0, 2000).build();
let DbScenario {
scenario_name, db, ..
} = scenario;
println!("Running scenario '{}'", scenario_name);
println!("Predicate: '{:#?}'", predicate);
let planner = InfluxRpcPlanner::new();
let ctx = db.executor().new_context(ExecutorType::Query);
let expected_fields = FieldList {
fields: vec![
Field {
name: "field1".into(),
data_type: DataType::Float64,
last_timestamp: 1000,
},
Field {
name: "field2".into(),
data_type: DataType::Utf8,
last_timestamp: 1000, // Need to verify with alamb if 1000 is the right one. It looks to me it should be 100
},
Field {
name: "field3".into(),
data_type: DataType::Float64,
last_timestamp: 100,
},
Field {
name: "field4".into(),
data_type: DataType::Boolean,
last_timestamp: 1000,
},
],
};
let plan = planner
.field_columns(db.as_ref(), predicate.clone())
.expect("built plan successfully");
let mut plans = plan.plans;
let plan = plans.pop().unwrap();
assert!(plans.is_empty()); // only one plan
// run the created plan directly, ensuring the output is as
// expected (specifically that the column ordering is correct)
let results = ctx.run_logical_plan(plan).await.expect("ok running plan");
let expected = vec![
"+--------+--------+--------+--------+--------------------------------+",
"| field1 | field2 | field3 | field4 | time |",
"+--------+--------+--------+--------+--------------------------------+",
"| 70.5 | ss | 2 | | 1970-01-01T00:00:00.000000100Z |",
"+--------+--------+--------+--------+--------------------------------+",
];
assert_batches_eq!(expected, &results);
}
run_field_columns_test_case(OneMeasurementManyFields {}, predicate, expected_fields).await;
}
#[tokio::test]
async fn test_field_name_plan_with_delete() {
test_helpers::maybe_start_logging();
// Tests that the ordering that comes out is reasonable
let scenarios = OneMeasurementManyFieldsWithDelete {}.make().await;
for scenario in scenarios {
let predicate = PredicateBuilder::default().timestamp_range(0, 200).build();
let predicate = PredicateBuilder::default().timestamp_range(0, 2000).build();
let DbScenario {
scenario_name, db, ..
} = scenario;
println!("Running scenario '{}'", scenario_name);
println!("Predicate: '{:#?}'", predicate);
let planner = InfluxRpcPlanner::new();
let ctx = db.executor().new_context(ExecutorType::Query);
let expected_fields = FieldList {
fields: vec![
Field {
name: "field1".into(),
data_type: DataType::Float64,
last_timestamp: 100,
},
Field {
name: "field2".into(),
data_type: DataType::Utf8,
last_timestamp: 100,
},
Field {
name: "field3".into(),
data_type: DataType::Float64,
last_timestamp: 100,
},
],
};
let plan = planner
.field_columns(db.as_ref(), predicate.clone())
.expect("built plan successfully");
let mut plans = plan.plans;
let plan = plans.pop().unwrap();
assert!(plans.is_empty()); // only one plan
// run the created plan directly, ensuring the output is as
// expected (specifically that the column ordering is correct)
let results = ctx.run_logical_plan(plan).await.expect("ok running plan");
// Todo: After the panic bug is fixed, this result should be recheck. I think column field4 will disappear from the result
let expected = vec![
"+--------+--------+--------+--------+--------------------------------+",
"| field1 | field2 | field3 | field4 | time |",
"+--------+--------+--------+--------+--------------------------------+",
"| 70.5 | ss | 2 | | 1970-01-01T00:00:00.000000100Z |",
"+--------+--------+--------+--------+--------------------------------+",
];
assert_batches_eq!(expected, &results);
}
run_field_columns_test_case(
OneMeasurementManyFieldsWithDelete {},
predicate,
expected_fields,
)
.await;
}

View File

@ -843,6 +843,7 @@ impl DbSetup for OneMeasurementManyFields {
"h2o,tag1=foo,tag2=bar field1=70.4,field2=\"ss\" 100",
"h2o,tag1=foo,tag2=bar field1=70.5,field2=\"ss\" 100",
"h2o,tag1=foo,tag2=bar field1=70.6,field4=true 1000",
"h2o,tag1=foo,tag2=bar field1=70.3,field5=false 3000",
];
all_scenarios_for_one_chunk(vec![], vec![], lp_lines, "h2o", partition_key).await
@ -863,6 +864,7 @@ impl DbSetup for OneMeasurementManyFieldsWithDelete {
"h2o,tag1=foo,tag2=bar field1=70.4,field2=\"ss\" 100",
"h2o,tag1=foo,tag2=bar field1=70.5,field2=\"ss\" 100",
"h2o,tag1=foo,tag2=bar field1=70.6,field4=true 1000",
"h2o,tag1=foo,tag2=bar field1=70.3,field5=false 3000",
];
// pred: delete from h2o where 1000 <= time <= 1000
@ -872,7 +874,7 @@ impl DbSetup for OneMeasurementManyFieldsWithDelete {
let pred = DeletePredicate {
range: TimestampRange {
start: 1000,
end: 3000,
end: 1100,
},
exprs: vec![],
};