feat: Allow sql test runner to compare sorted output (#3618)

* refactor: Add Query struct

* feat: Implement sorted checking

* refactor: port some sql tests over

* fix: fmt

* fix: Apply suggestions from code review

Co-authored-by: Edd Robinson <me@edd.io>

Co-authored-by: Edd Robinson <me@edd.io>
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
pull/24376/head
Andrew Lamb 2022-02-02 14:59:52 -05:00 committed by GitHub
parent 429d59f1b6
commit c4a234e83c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 535 additions and 243 deletions

View File

@ -16,16 +16,19 @@ The tests in `src/runner` are driven somewhat more dynamically based on input fi
How do you make a new test:
1. Add a new file .sql to the `cases/in` directory
2. Regenerate file: `(cd generate && cargo run)`
2. Run the tests `` cargo test -p query_tests`
3. You will get a failure message that contains examples of how to update the files
## Example output
```
Possibly helpful commands:
# See diff
diff -du "/Users/alamb/Software/influxdb_iox/query_tests/cases/in/pushdown.expected" "/Users/alamb/Software/influxdb_iox/query_tests/cases/out/pushdown.out"
# Update expected
cp -f "/Users/alamb/Software/influxdb_iox/query_tests/cases/in/pushdown.out" "/Users/alamb/Software/influxdb_iox/query_tests/cases/out/pushdown.expected"
```
# Cookbook: Adding a new test scenario

View File

@ -0,0 +1,46 @@
-- Test Setup: TwoMeasurementsManyFields
-- SQL: SELECT * from information_schema.tables;
-- Results After Sorting
+---------------+--------------------+---------------------+------------+
| table_catalog | table_schema | table_name | table_type |
+---------------+--------------------+---------------------+------------+
| public | information_schema | columns | VIEW |
| public | information_schema | tables | VIEW |
| public | iox | h2o | BASE TABLE |
| public | iox | o2 | BASE TABLE |
| public | system | chunk_columns | BASE TABLE |
| public | system | chunks | BASE TABLE |
| public | system | columns | BASE TABLE |
| public | system | operations | BASE TABLE |
| public | system | persistence_windows | BASE TABLE |
| public | system | queries | BASE TABLE |
+---------------+--------------------+---------------------+------------+
-- SQL: SELECT * from information_schema.columns where table_name = 'h2o' OR table_name = 'o2';
-- Results After Sorting
+---------------+--------------+------------+-------------+------------------+----------------+-------------+-----------------------------+--------------------------+------------------------+-------------------+-------------------------+---------------+--------------------+---------------+
| table_catalog | table_schema | table_name | column_name | ordinal_position | column_default | is_nullable | data_type | character_maximum_length | character_octet_length | numeric_precision | numeric_precision_radix | numeric_scale | datetime_precision | interval_type |
+---------------+--------------+------------+-------------+------------------+----------------+-------------+-----------------------------+--------------------------+------------------------+-------------------+-------------------------+---------------+--------------------+---------------+
| public | iox | h2o | city | 0 | | YES | Dictionary(Int32, Utf8) | | | | | | | |
| public | iox | h2o | moisture | 1 | | YES | Float64 | | | 24 | 2 | | | |
| public | iox | h2o | other_temp | 2 | | YES | Float64 | | | 24 | 2 | | | |
| public | iox | h2o | state | 3 | | YES | Dictionary(Int32, Utf8) | | | | | | | |
| public | iox | h2o | temp | 4 | | YES | Float64 | | | 24 | 2 | | | |
| public | iox | h2o | time | 5 | | NO | Timestamp(Nanosecond, None) | | | | | | | |
| public | iox | o2 | city | 0 | | YES | Dictionary(Int32, Utf8) | | | | | | | |
| public | iox | o2 | reading | 1 | | YES | Float64 | | | 24 | 2 | | | |
| public | iox | o2 | state | 2 | | YES | Dictionary(Int32, Utf8) | | | | | | | |
| public | iox | o2 | temp | 3 | | YES | Float64 | | | 24 | 2 | | | |
| public | iox | o2 | time | 4 | | NO | Timestamp(Nanosecond, None) | | | | | | | |
+---------------+--------------+------------+-------------+------------------+----------------+-------------+-----------------------------+--------------------------+------------------------+-------------------+-------------------------+---------------+--------------------+---------------+
-- SQL: SHOW COLUMNS FROM h2o;
-- Results After Sorting
+---------------+--------------+------------+-------------+-----------------------------+-------------+
| table_catalog | table_schema | table_name | column_name | data_type | is_nullable |
+---------------+--------------+------------+-------------+-----------------------------+-------------+
| public | iox | h2o | city | Dictionary(Int32, Utf8) | YES |
| public | iox | h2o | moisture | Float64 | YES |
| public | iox | h2o | other_temp | Float64 | YES |
| public | iox | h2o | state | Dictionary(Int32, Utf8) | YES |
| public | iox | h2o | temp | Float64 | YES |
| public | iox | h2o | time | Timestamp(Nanosecond, None) | NO |
+---------------+--------------+------------+-------------+-----------------------------+-------------+

View File

@ -0,0 +1,13 @@
-- IOX_SETUP: TwoMeasurementsManyFields
-- validate we have access to information schema for listing table names
-- IOX_COMPARE: sorted
SELECT * from information_schema.tables;
-- validate we have access to information schema for listing columns names/types
-- IOX_COMPARE: sorted
SELECT * from information_schema.columns where table_name = 'h2o' OR table_name = 'o2';
-- validate we have access to SHOW SCHEMA for listing columns names
-- IOX_COMPARE: sorted
SHOW COLUMNS FROM h2o;

View File

@ -0,0 +1,25 @@
-- Test Setup: TwoMeasurementsManyFieldsOneChunk
-- SQL: SELECT partition_key, table_name, storage, memory_bytes, row_count from system.chunks;
-- Results After Sorting
+---------------+------------+-------------------+--------------+-----------+
| partition_key | table_name | storage | memory_bytes | row_count |
+---------------+------------+-------------------+--------------+-----------+
| 1970-01-01T00 | h2o | OpenMutableBuffer | 1831 | 3 |
| 1970-01-01T00 | o2 | OpenMutableBuffer | 1827 | 2 |
+---------------+------------+-------------------+--------------+-----------+
-- SQL: SELECT * from system.columns;
-- Results After Sorting
+---------------+------------+-------------+-------------+---------------+
| partition_key | table_name | column_name | column_type | influxdb_type |
+---------------+------------+-------------+-------------+---------------+
| 1970-01-01T00 | h2o | city | String | Tag |
| 1970-01-01T00 | h2o | other_temp | F64 | Field |
| 1970-01-01T00 | h2o | state | String | Tag |
| 1970-01-01T00 | h2o | temp | F64 | Field |
| 1970-01-01T00 | h2o | time | I64 | Timestamp |
| 1970-01-01T00 | o2 | city | String | Tag |
| 1970-01-01T00 | o2 | reading | F64 | Field |
| 1970-01-01T00 | o2 | state | String | Tag |
| 1970-01-01T00 | o2 | temp | F64 | Field |
| 1970-01-01T00 | o2 | time | I64 | Timestamp |
+---------------+------------+-------------+-------------+---------------+

View File

@ -0,0 +1,16 @@
-- IOX_SETUP: TwoMeasurementsManyFieldsOneChunk
--
-- system tables reflect the state of chunks, so don't run them
-- with different chunk configurations.
--
-- ensures the tables / plumbing are hooked up (so no need to
-- test timestamps, etc)
-- IOX_COMPARE: sorted
SELECT partition_key, table_name, storage, memory_bytes, row_count from system.chunks;
-- IOX_COMPARE: sorted
SELECT * from system.columns;

View File

@ -0,0 +1,21 @@
-- Test Setup: TwoMeasurementsManyFieldsTwoChunks
-- SQL: SELECT partition_key, table_name, column_name, storage, row_count, null_count, min_value, max_value, memory_bytes from system.chunk_columns;
-- Results After Sorting
+---------------+------------+-------------+-------------------+-----------+------------+-----------+-----------+--------------+
| partition_key | table_name | column_name | storage | row_count | null_count | min_value | max_value | memory_bytes |
+---------------+------------+-------------+-------------------+-----------+------------+-----------+-----------+--------------+
| 1970-01-01T00 | h2o | city | OpenMutableBuffer | 1 | 0 | Boston | Boston | 309 |
| 1970-01-01T00 | h2o | city | ReadBuffer | 2 | 0 | Boston | Boston | 359 |
| 1970-01-01T00 | h2o | other_temp | OpenMutableBuffer | 1 | 0 | 72.4 | 72.4 | 297 |
| 1970-01-01T00 | h2o | other_temp | ReadBuffer | 2 | 1 | 70.4 | 70.4 | 471 |
| 1970-01-01T00 | h2o | state | OpenMutableBuffer | 1 | 0 | CA | CA | 309 |
| 1970-01-01T00 | h2o | state | ReadBuffer | 2 | 0 | MA | MA | 347 |
| 1970-01-01T00 | h2o | temp | ReadBuffer | 2 | 1 | 70.4 | 70.4 | 471 |
| 1970-01-01T00 | h2o | time | OpenMutableBuffer | 1 | 0 | 350 | 350 | 297 |
| 1970-01-01T00 | h2o | time | ReadBuffer | 2 | 0 | 50 | 250 | 110 |
| 1970-01-01T00 | o2 | city | OpenMutableBuffer | 2 | 1 | Boston | Boston | 309 |
| 1970-01-01T00 | o2 | reading | OpenMutableBuffer | 2 | 1 | 51 | 51 | 297 |
| 1970-01-01T00 | o2 | state | OpenMutableBuffer | 2 | 0 | CA | MA | 313 |
| 1970-01-01T00 | o2 | temp | OpenMutableBuffer | 2 | 0 | 53.4 | 79 | 297 |
| 1970-01-01T00 | o2 | time | OpenMutableBuffer | 2 | 0 | 50 | 300 | 297 |
+---------------+------------+-------------+-------------------+-----------+------------+-----------+-----------+--------------+

View File

@ -0,0 +1,12 @@
-- IOX_SETUP: TwoMeasurementsManyFieldsTwoChunks
--
-- system tables reflect the state of chunks, so don't run them
-- with different chunk configurations.
--
-- ensures the tables / plumbing are hooked up (so no need to
-- test timestamps, etc)
-- IOX_COMPARE: sorted
SELECT partition_key, table_name, column_name, storage, row_count, null_count, min_value, max_value, memory_bytes from system.chunk_columns;

View File

@ -158,6 +158,48 @@ async fn test_cases_pushdown_sql() {
.expect("flush worked");
}
#[tokio::test]
// Tests from "sql_information_schema.sql",
async fn test_cases_sql_information_schema_sql() {
let input_path = Path::new("cases").join("in").join("sql_information_schema.sql");
let mut runner = Runner::new();
runner
.run(input_path)
.await
.expect("test failed");
runner
.flush()
.expect("flush worked");
}
#[tokio::test]
// Tests from "sql_system_tables.sql",
async fn test_cases_sql_system_tables_sql() {
let input_path = Path::new("cases").join("in").join("sql_system_tables.sql");
let mut runner = Runner::new();
runner
.run(input_path)
.await
.expect("test failed");
runner
.flush()
.expect("flush worked");
}
#[tokio::test]
// Tests from "sql_system_tables2.sql",
async fn test_cases_sql_system_tables2_sql() {
let input_path = Path::new("cases").join("in").join("sql_system_tables2.sql");
let mut runner = Runner::new();
runner
.run(input_path)
.await
.expect("test failed");
runner
.flush()
.expect("flush worked");
}
#[tokio::test]
// Tests from "stats_plans.sql",
async fn test_cases_stats_plans_sql() {
@ -184,4 +226,4 @@ async fn test_cases_timestamps_sql() {
runner
.flush()
.expect("flush worked");
}
}

View File

@ -4,7 +4,7 @@ mod parse;
mod setup;
use arrow::record_batch::RecordBatch;
use arrow_util::display::pretty_format_batches;
use arrow_util::{display::pretty_format_batches, test_util::sort_record_batch};
use query::{
exec::{Executor, ExecutorType},
frontend::sql::SqlQueryPlanner,
@ -17,7 +17,10 @@ use std::{
sync::Arc,
};
use self::{parse::TestQueries, setup::TestSetup};
use self::{
parse::{Query, TestQueries},
setup::TestSetup,
};
use crate::scenarios::{DbScenario, DbSetup};
use query::exec::ExecutorConfig;
@ -196,7 +199,10 @@ impl<W: Write> Runner<W> {
let db_setup = test_setup.get_setup().context(SetupSnafu)?;
for q in queries.iter() {
output.push(format!("-- SQL: {}", q));
output.push(format!("-- SQL: {}", q.sql()));
if q.sorted_compare() {
output.push("-- Results After Sorting".into())
}
output.append(&mut self.run_query(q, db_setup.as_ref()).await?);
}
@ -259,7 +265,8 @@ impl<W: Write> Runner<W> {
///
/// Note this does not (yet) understand how to compare results
/// while ignoring output order
async fn run_query(&mut self, sql: &str, db_setup: &dyn DbSetup) -> Result<Vec<String>> {
async fn run_query(&mut self, query: &Query, db_setup: &dyn DbSetup) -> Result<Vec<String>> {
let sql = query.sql();
let mut previous_results = vec![];
for scenario in db_setup.make().await {
@ -284,7 +291,15 @@ impl<W: Write> Runner<W> {
.await
.expect("built plan successfully");
let results: Vec<RecordBatch> = ctx.collect(physical_plan).await.expect("Running plan");
let mut results: Vec<RecordBatch> =
ctx.collect(physical_plan).await.expect("Running plan");
// compare against sorted results, if requested
if query.sorted_compare() && !results.is_empty() {
let schema = results[0].schema();
let batch = RecordBatch::concat(&schema, &results).expect("concatenating batches");
results = vec![sort_record_batch(batch)];
}
let current_results = pretty_format_batches(&results)
.unwrap()
@ -346,15 +361,16 @@ mod test {
use super::*;
const TEST_INPUT: &str = r#"
#[tokio::test]
async fn runner_positive() {
let input = r#"
-- Runner test, positive
-- IOX_SETUP: TwoMeasurements
-- Only a single query
SELECT * from disk;
"#;
const EXPECTED_OUTPUT: &str = r#"-- Test Setup: TwoMeasurements
let expected = r#"-- Test Setup: TwoMeasurements
-- SQL: SELECT * from disk;
+-------+--------+--------------------------------+
| bytes | region | time |
@ -363,70 +379,189 @@ SELECT * from disk;
+-------+--------+--------------------------------+
"#;
#[tokio::test]
async fn runner_positive() {
let (_tmp_dir, input_file) = make_in_file(TEST_INPUT);
let output_path = make_output_path(&input_file).unwrap();
let expected_path = input_file.with_extension("expected");
// write expected output
std::fs::write(&expected_path, EXPECTED_OUTPUT).unwrap();
let mut runner = Runner::new_with_writer(vec![]);
let runner_results = runner.run(&input_file).await;
let results = run_case(input, expected).await;
// ensure that the generated output and expected output match
let output_contents = read_file(&output_path);
assert_eq!(output_contents, EXPECTED_OUTPUT);
assert_eq!(results.output_contents, expected);
// Test should have succeeded
runner_results.expect("successful run");
results.runner_result.expect("successful run");
// examine the output log and ensure it contains expected results
let runner_log = runner_to_log(runner);
assert_contains!(&runner_log, format!("writing output to {:?}", &output_path));
assert_contains!(
&runner_log,
format!("expected output in {:?}", &expected_path)
&results.runner_log,
format!("writing output to {:?}", &results.output_path)
);
assert_contains!(&runner_log, "Setup: TwoMeasurements");
assert_contains!(&runner_log, "Running scenario");
assert_contains!(
&results.runner_log,
format!("expected output in {:?}", &results.expected_path)
);
assert_contains!(&results.runner_log, "Setup: TwoMeasurements");
assert_contains!(&results.runner_log, "Running scenario");
}
#[tokio::test]
async fn runner_negative() {
let (_tmp_dir, input_file) = make_in_file(TEST_INPUT);
let output_path = make_output_path(&input_file).unwrap();
let expected_path = input_file.with_extension("expected");
let input = r#"
-- Runner test, positive
-- IOX_SETUP: TwoMeasurements
// write incorrect expected output
std::fs::write(&expected_path, "this is not correct").unwrap();
-- Only a single query
SELECT * from disk;
"#;
let expected = r#"-- Test Setup: TwoMeasurements
-- SQL: SELECT * from disk;
+-------+--------+--------------------------------+
| bytes | region | time |
+-------+--------+--------------------------------+
| 99 | east | 1970-01-01T00:00:00.000000200Z |
+-------+--------+--------------------------------+
"#;
let mut runner = Runner::new_with_writer(vec![]);
let runner_results = runner.run(&input_file).await;
let results = run_case(input, "this is not correct").await;
// ensure that the generated output and expected output match
let output_contents = read_file(&output_path);
assert_eq!(output_contents, EXPECTED_OUTPUT);
assert_eq!(results.output_contents, expected);
// Test should have failed
let err_string = runner_results.unwrap_err().to_string();
let err_string = results.runner_result.unwrap_err().to_string();
assert_contains!(
err_string,
format!(
"Contents of output '{:?}' does not match contents of expected '{:?}'",
&output_path, &expected_path
&results.output_path, &results.expected_path
)
);
// examine the output log and ensure it contains expected resouts
let runner_log = runner_to_log(runner);
assert_contains!(&runner_log, format!("writing output to {:?}", &output_path));
assert_contains!(
&runner_log,
format!("expected output in {:?}", &expected_path)
&results.runner_log,
format!("writing output to {:?}", &results.output_path)
);
assert_contains!(&runner_log, "Setup: TwoMeasurements");
assert_contains!(
&results.runner_log,
format!("expected output in {:?}", &results.expected_path)
);
assert_contains!(&results.runner_log, "Setup: TwoMeasurements");
}
/// Ensure differences in sort order produce output errors
#[tokio::test]
async fn runner_different_sorts_error() {
let input = r#"
-- Runner test, positive
-- IOX_SETUP: TwoMeasurements
-- Only a single query
SELECT * from cpu ORDER BY time DESC;
"#;
let expected = r#"-- Test Setup: TwoMeasurements
-- SQL: SELECT * from cpu ORDER BY time DESC;
+--------+--------------------------------+------+
| region | time | user |
+--------+--------------------------------+------+
| west | 1970-01-01T00:00:00.000000150Z | 21 |
| west | 1970-01-01T00:00:00.000000100Z | 23.2 |
+--------+--------------------------------+------+
"#;
let results = run_case(input, expected).await;
// ensure that the generated output and expected output match
assert_eq!(results.output_contents, expected);
results.runner_result.unwrap();
// now, however, if the results are in a different order
// expect an output mismatch
let expected = r#"-- Test Setup: TwoMeasurements
-- SQL: SELECT * from cpu ORDER BY time DESC;
+--------+--------------------------------+------+
| region | time | user |
+--------+--------------------------------+------+
| west | 1970-01-01T00:00:00.000000100Z | 23.2 |
| west | 1970-01-01T00:00:00.000000150Z | 21 |
+--------+--------------------------------+------+
"#;
let results = run_case(input, expected).await;
// ensure that the generated output and expected output match
results.runner_result.unwrap_err();
assert_contains!(
&results.runner_log,
"Expected output does not match actual output"
);
}
/// Ensure differences in sort order does NOT produce output error
#[tokio::test]
async fn runner_different_sorts_with_sorted_compare() {
let input = r#"
-- Runner test, positive
-- IOX_SETUP: TwoMeasurements
-- IOX_COMPARE: sorted
SELECT * from cpu ORDER BY time DESC;
"#;
// note the output is not sorted `DESC` in time (it is ASC)
let expected = r#"-- Test Setup: TwoMeasurements
-- SQL: SELECT * from cpu ORDER BY time DESC;
-- Results After Sorting
+--------+--------------------------------+------+
| region | time | user |
+--------+--------------------------------+------+
| west | 1970-01-01T00:00:00.000000100Z | 23.2 |
| west | 1970-01-01T00:00:00.000000150Z | 21 |
+--------+--------------------------------+------+
"#;
let results = run_case(input, expected).await;
// ensure that the generated output and expected output match
assert_eq!(results.output_contents, expected);
results.runner_result.unwrap();
}
/// Result of running the test_input with an expected output
struct RunResult {
/// Result of running the test case
runner_result: Result<()>,
/// The path that expected file was located in
expected_path: PathBuf,
/// The output file that the runner actually produced
output_contents: String,
/// The path that the output file was written to
output_path: PathBuf,
// The log the runner produced
runner_log: String,
}
/// Uses the test runner to run the expected input and compares it
/// to the expected output, returning the runner used to do the
/// comparison as well as the result of the run
async fn run_case(test_input: &str, expected_output: &str) -> RunResult {
let (_tmp_dir, input_file) = make_in_file(test_input);
let output_path = make_output_path(&input_file).unwrap();
let expected_path = input_file.with_extension("expected");
// write expected output
std::fs::write(&expected_path, expected_output).unwrap();
let mut runner = Runner::new_with_writer(vec![]);
let runner_result = runner.run(&input_file).await;
let output_contents = read_file(&output_path);
let runner_log = runner_to_log(runner);
RunResult {
runner_result,
expected_path,
output_contents,
output_path,
runner_log,
}
}
fn make_in_file<C: AsRef<[u8]>>(contents: C) -> (tempfile::TempDir, PathBuf) {

View File

@ -1,20 +1,80 @@
/// Poor man's parser to find all the SQL queries in the input
/// A query to run with optional annotations
#[derive(Debug, PartialEq, Default)]
pub struct Query {
/// If true, results are sorted first prior to comparison, meaning
/// that differences in the output order compared with expected
/// order do not cause a diff
sorted_compare: bool,
/// The SQL string
sql: String,
}
impl Query {
pub fn new(sql: impl Into<String>) -> Self {
let sql = sql.into();
Self {
sorted_compare: false,
sql,
}
}
pub fn with_sorted_compare(mut self) -> Self {
self.sorted_compare = true;
self
}
/// Get a reference to the query's sql.
pub fn sql(&self) -> &str {
self.sql.as_ref()
}
/// Get the query's sorted compare.
pub fn sorted_compare(&self) -> bool {
self.sorted_compare
}
}
#[derive(Debug, Default)]
struct QueryBuilder {
query: Query,
}
impl QueryBuilder {
fn new() -> Self {
Default::default()
}
fn push_str(&mut self, s: &str) {
self.query.sql.push_str(s)
}
fn push(&mut self, c: char) {
self.query.sql.push(c)
}
fn sorted_compare(&mut self) {
self.query.sorted_compare = true;
}
fn is_empty(&self) -> bool {
self.query.sql.is_empty()
}
/// Creates a Query and resets this builder to default
fn build_and_reset(&mut self) -> Option<Query> {
(!self.is_empty()).then(|| std::mem::take(&mut self.query))
}
}
/// Poor man's parser to find all the SQL queries in an input file
#[derive(Debug, PartialEq)]
pub struct TestQueries {
queries: Vec<String>,
queries: Vec<Query>,
}
impl TestQueries {
pub fn new<I, S>(queries: I) -> Self
where
I: IntoIterator<Item = S>,
S: AsRef<str>,
{
let queries = queries
.into_iter()
.map(|s| s.as_ref().to_string())
.collect();
pub fn new(queries: Vec<Query>) -> Self {
Self { queries }
}
@ -25,10 +85,14 @@ impl TestQueries {
S: AsRef<str>,
{
let mut queries = vec![];
let mut current_line = String::new();
let mut builder = QueryBuilder::new();
lines.into_iter().for_each(|line| {
let line = line.as_ref().trim();
if line == "-- IOX_COMPARE: sorted" {
builder.sorted_compare();
}
if line.starts_with("--") {
return;
}
@ -36,29 +100,30 @@ impl TestQueries {
return;
}
// declare queries when we see a semicolon at the end of the line
if !current_line.is_empty() {
current_line.push(' ');
// replace newlines
if !builder.is_empty() {
builder.push(' ');
}
current_line.push_str(line);
builder.push_str(line);
// declare queries when we see a semicolon at the end of the line
if line.ends_with(';') {
// resets current_line to String::new()
let t = std::mem::take(&mut current_line);
queries.push(t);
if let Some(q) = builder.build_and_reset() {
queries.push(q);
}
}
});
if !current_line.is_empty() {
queries.push(current_line);
if let Some(q) = builder.build_and_reset() {
queries.push(q);
}
Self { queries }
}
// Get an iterator over the queries
pub fn iter(&self) -> impl Iterator<Item = &str> {
self.queries.iter().map(|s| s.as_str())
pub fn iter(&self) -> impl Iterator<Item = &Query> {
self.queries.iter()
}
}
@ -80,7 +145,10 @@ select * from bar;
let queries = TestQueries::from_lines(input.split('\n'));
assert_eq!(
queries,
TestQueries::new(vec!["select * from foo;", "select * from bar;"])
TestQueries::new(vec![
Query::new("select * from foo;"),
Query::new("select * from bar;"),
])
)
}
@ -94,7 +162,31 @@ select * from bar
let queries = TestQueries::from_lines(input.split('\n'));
assert_eq!(
queries,
TestQueries::new(vec!["select * from foo;", "select * from bar"])
TestQueries::new(vec![
Query::new("select * from foo;"),
Query::new("select * from bar")
])
)
}
#[test]
fn test_parse_queries_mulit_line() {
let input = r#"
select
*
from
foo;
select * from bar;
"#;
let queries = TestQueries::from_lines(input.split('\n'));
assert_eq!(
queries,
TestQueries::new(vec![
Query::new("select * from foo;"),
Query::new("select * from bar;"),
])
)
}
@ -105,6 +197,62 @@ select * from bar
-- another comment
"#;
let queries = TestQueries::from_lines(input.split('\n'));
assert_eq!(queries, TestQueries::new(vec![] as Vec<String>))
assert_eq!(queries, TestQueries::new(vec![] as Vec<Query>))
}
#[test]
fn test_parse_queries_sorted_compare() {
let input = r#"
select * from foo;
-- The second query should be compared to expected after sorting
-- IOX_COMPARE: sorted
select * from bar;
-- Since this query is not annotated, it should not use exected sorted
select * from baz;
select * from baz2;
-- IOX_COMPARE: sorted
select * from waz;
-- (But the compare should work subsequently)
"#;
let queries = TestQueries::from_lines(input.split('\n'));
assert_eq!(
queries,
TestQueries::new(vec![
Query::new("select * from foo;"),
Query::new("select * from bar;").with_sorted_compare(),
Query::new("select * from baz;"),
Query::new("select * from baz2;"),
Query::new("select * from waz;").with_sorted_compare(),
])
)
}
#[test]
fn test_parse_queries_sorted_compare_after() {
let input = r#"
select * from foo;
-- IOX_COMPARE: sorted
"#;
let queries = TestQueries::from_lines(input.split('\n'));
assert_eq!(
queries,
TestQueries::new(vec![Query::new("select * from foo;"),])
)
}
#[test]
fn test_parse_queries_sorted_compare_not_match_ignored() {
let input = r#"
-- IOX_COMPARE: something_else
select * from foo;
"#;
let queries = TestQueries::from_lines(input.split('\n'));
assert_eq!(
queries,
TestQueries::new(vec![Query::new("select * from foo;"),])
)
}
}

View File

@ -48,8 +48,10 @@ pub fn get_all_setups() -> &'static HashMap<String, Arc<dyn DbSetup>> {
SETUPS.get_or_init(|| {
vec![
register_setup!(TwoMeasurements),
register_setup!(TwoMeasurementsManyFields),
register_setup!(TwoMeasurementsPredicatePushDown),
register_setup!(TwoMeasurementsManyFieldsOneChunk),
register_setup!(TwoMeasurementsManyFieldsTwoChunks),
register_setup!(TwoMeasurementsManyFieldsOneRubChunk),
register_setup!(OneMeasurementFourChunksWithDuplicates),
register_setup!(OneMeasurementAllChunksDropped),

View File

@ -123,177 +123,6 @@ async fn sql_select_from_school() {
.await;
}
#[tokio::test]
async fn sql_select_from_information_schema_tables() {
// validate we have access to information schema for listing table
// names
let expected = vec![
"+---------------+--------------------+---------------------+------------+",
"| table_catalog | table_schema | table_name | table_type |",
"+---------------+--------------------+---------------------+------------+",
"| public | information_schema | columns | VIEW |",
"| public | information_schema | tables | VIEW |",
"| public | iox | h2o | BASE TABLE |",
"| public | iox | o2 | BASE TABLE |",
"| public | system | chunk_columns | BASE TABLE |",
"| public | system | chunks | BASE TABLE |",
"| public | system | columns | BASE TABLE |",
"| public | system | operations | BASE TABLE |",
"| public | system | persistence_windows | BASE TABLE |",
"| public | system | queries | BASE TABLE |",
"+---------------+--------------------+---------------------+------------+",
];
run_sql_test_case(
TwoMeasurementsManyFields {},
"SELECT * from information_schema.tables",
&expected,
)
.await;
run_sql_test_case(TwoMeasurementsManyFields {}, "SHOW TABLES", &expected).await;
}
#[tokio::test]
async fn sql_select_from_information_schema_columns() {
// validate we have access to information schema for listing columns
// names
let expected = vec![
"+---------------+--------------+------------+-------------+------------------+----------------+-------------+-----------------------------+--------------------------+------------------------+-------------------+-------------------------+---------------+--------------------+---------------+",
"| table_catalog | table_schema | table_name | column_name | ordinal_position | column_default | is_nullable | data_type | character_maximum_length | character_octet_length | numeric_precision | numeric_precision_radix | numeric_scale | datetime_precision | interval_type |",
"+---------------+--------------+------------+-------------+------------------+----------------+-------------+-----------------------------+--------------------------+------------------------+-------------------+-------------------------+---------------+--------------------+---------------+",
"| public | iox | h2o | city | 0 | | YES | Dictionary(Int32, Utf8) | | | | | | | |",
"| public | iox | h2o | moisture | 1 | | YES | Float64 | | | 24 | 2 | | | |",
"| public | iox | h2o | other_temp | 2 | | YES | Float64 | | | 24 | 2 | | | |",
"| public | iox | h2o | state | 3 | | YES | Dictionary(Int32, Utf8) | | | | | | | |",
"| public | iox | h2o | temp | 4 | | YES | Float64 | | | 24 | 2 | | | |",
"| public | iox | h2o | time | 5 | | NO | Timestamp(Nanosecond, None) | | | | | | | |",
"| public | iox | o2 | city | 0 | | YES | Dictionary(Int32, Utf8) | | | | | | | |",
"| public | iox | o2 | reading | 1 | | YES | Float64 | | | 24 | 2 | | | |",
"| public | iox | o2 | state | 2 | | YES | Dictionary(Int32, Utf8) | | | | | | | |",
"| public | iox | o2 | temp | 3 | | YES | Float64 | | | 24 | 2 | | | |",
"| public | iox | o2 | time | 4 | | NO | Timestamp(Nanosecond, None) | | | | | | | |",
"+---------------+--------------+------------+-------------+------------------+----------------+-------------+-----------------------------+--------------------------+------------------------+-------------------+-------------------------+---------------+--------------------+---------------+",
];
run_sql_test_case(
TwoMeasurementsManyFields {},
"SELECT * from information_schema.columns where table_name = 'h2o' OR table_name = 'o2'",
&expected,
)
.await;
}
#[tokio::test]
async fn sql_show_columns() {
// validate we have access to SHOW SCHEMA for listing columns
// names
let expected = vec![
"+---------------+--------------+------------+-------------+-----------------------------+-------------+",
"| table_catalog | table_schema | table_name | column_name | data_type | is_nullable |",
"+---------------+--------------+------------+-------------+-----------------------------+-------------+",
"| public | iox | h2o | city | Dictionary(Int32, Utf8) | YES |",
"| public | iox | h2o | moisture | Float64 | YES |",
"| public | iox | h2o | other_temp | Float64 | YES |",
"| public | iox | h2o | state | Dictionary(Int32, Utf8) | YES |",
"| public | iox | h2o | temp | Float64 | YES |",
"| public | iox | h2o | time | Timestamp(Nanosecond, None) | NO |",
"+---------------+--------------+------------+-------------+-----------------------------+-------------+",
];
run_sql_test_case(
TwoMeasurementsManyFields {},
"SHOW COLUMNS FROM h2o",
&expected,
)
.await;
}
#[tokio::test]
async fn sql_select_from_system_chunks() {
// system tables reflect the state of chunks, so don't run them
// with different chunk configurations.
// ensures the tables / plumbing are hooked up (so no need to
// test timestamps, etc)
let expected = vec![
"+---------------+------------+-------------------+--------------+-----------+",
"| partition_key | table_name | storage | memory_bytes | row_count |",
"+---------------+------------+-------------------+--------------+-----------+",
"| 1970-01-01T00 | h2o | OpenMutableBuffer | 1831 | 3 |",
"| 1970-01-01T00 | o2 | OpenMutableBuffer | 1827 | 2 |",
"+---------------+------------+-------------------+--------------+-----------+",
];
run_sql_test_case(
TwoMeasurementsManyFieldsOneChunk {},
"SELECT partition_key, table_name, storage, memory_bytes, row_count from system.chunks",
&expected,
)
.await;
}
#[tokio::test]
async fn sql_select_from_system_columns() {
// system tables reflect the state of chunks, so don't run them
// with different chunk configurations.
// ensures the tables / plumbing are hooked up (so no need to
// test timestamps, etc)
let expected = vec![
"+---------------+------------+-------------+-------------+---------------+",
"| partition_key | table_name | column_name | column_type | influxdb_type |",
"+---------------+------------+-------------+-------------+---------------+",
"| 1970-01-01T00 | h2o | city | String | Tag |",
"| 1970-01-01T00 | h2o | other_temp | F64 | Field |",
"| 1970-01-01T00 | h2o | state | String | Tag |",
"| 1970-01-01T00 | h2o | temp | F64 | Field |",
"| 1970-01-01T00 | h2o | time | I64 | Timestamp |",
"| 1970-01-01T00 | o2 | city | String | Tag |",
"| 1970-01-01T00 | o2 | reading | F64 | Field |",
"| 1970-01-01T00 | o2 | state | String | Tag |",
"| 1970-01-01T00 | o2 | temp | F64 | Field |",
"| 1970-01-01T00 | o2 | time | I64 | Timestamp |",
"+---------------+------------+-------------+-------------+---------------+",
];
run_sql_test_case(
TwoMeasurementsManyFieldsOneChunk {},
"SELECT * from system.columns",
&expected,
)
.await;
}
#[tokio::test]
async fn sql_select_from_system_chunk_columns() {
// system tables reflect the state of chunks, so don't run them
// with different chunk configurations.
let expected = vec![
"+---------------+------------+-------------+-------------------+-----------+------------+-----------+-----------+--------------+",
"| partition_key | table_name | column_name | storage | row_count | null_count | min_value | max_value | memory_bytes |",
"+---------------+------------+-------------+-------------------+-----------+------------+-----------+-----------+--------------+",
"| 1970-01-01T00 | h2o | city | OpenMutableBuffer | 1 | 0 | Boston | Boston | 309 |",
"| 1970-01-01T00 | h2o | city | ReadBuffer | 2 | 0 | Boston | Boston | 359 |",
"| 1970-01-01T00 | h2o | other_temp | OpenMutableBuffer | 1 | 0 | 72.4 | 72.4 | 297 |",
"| 1970-01-01T00 | h2o | other_temp | ReadBuffer | 2 | 1 | 70.4 | 70.4 | 471 |",
"| 1970-01-01T00 | h2o | state | OpenMutableBuffer | 1 | 0 | CA | CA | 309 |",
"| 1970-01-01T00 | h2o | state | ReadBuffer | 2 | 0 | MA | MA | 347 |",
"| 1970-01-01T00 | h2o | temp | ReadBuffer | 2 | 1 | 70.4 | 70.4 | 471 |",
"| 1970-01-01T00 | h2o | time | OpenMutableBuffer | 1 | 0 | 350 | 350 | 297 |",
"| 1970-01-01T00 | h2o | time | ReadBuffer | 2 | 0 | 50 | 250 | 110 |",
"| 1970-01-01T00 | o2 | city | OpenMutableBuffer | 2 | 1 | Boston | Boston | 309 |",
"| 1970-01-01T00 | o2 | reading | OpenMutableBuffer | 2 | 1 | 51 | 51 | 297 |",
"| 1970-01-01T00 | o2 | state | OpenMutableBuffer | 2 | 0 | CA | MA | 313 |",
"| 1970-01-01T00 | o2 | temp | OpenMutableBuffer | 2 | 0 | 53.4 | 79 | 297 |",
"| 1970-01-01T00 | o2 | time | OpenMutableBuffer | 2 | 0 | 50 | 300 | 297 |",
"+---------------+------------+-------------+-------------------+-----------+------------+-----------+-----------+--------------+",
];
run_sql_test_case(
TwoMeasurementsManyFieldsTwoChunks {},
"SELECT partition_key, table_name, column_name, storage, row_count, null_count, min_value, max_value, memory_bytes from system.chunk_columns",
&expected,
)
.await;
}
#[tokio::test]
async fn sql_select_from_system_operations() {
test_helpers::maybe_start_logging();