Merge branch 'main' into cn/ingester2
commit
9e3d0fcefb
|
@ -1238,7 +1238,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "datafusion"
|
||||
version = "14.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fdc83e8524df30ac5d0ae097572b7c48dc686ba9#fdc83e8524df30ac5d0ae097572b7c48dc686ba9"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
|
||||
dependencies = [
|
||||
"ahash 0.8.2",
|
||||
"arrow",
|
||||
|
@ -1283,7 +1283,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "datafusion-common"
|
||||
version = "14.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fdc83e8524df30ac5d0ae097572b7c48dc686ba9#fdc83e8524df30ac5d0ae097572b7c48dc686ba9"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
|
@ -1295,7 +1295,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "datafusion-expr"
|
||||
version = "14.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fdc83e8524df30ac5d0ae097572b7c48dc686ba9#fdc83e8524df30ac5d0ae097572b7c48dc686ba9"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
|
||||
dependencies = [
|
||||
"ahash 0.8.2",
|
||||
"arrow",
|
||||
|
@ -1307,7 +1307,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "datafusion-optimizer"
|
||||
version = "14.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fdc83e8524df30ac5d0ae097572b7c48dc686ba9#fdc83e8524df30ac5d0ae097572b7c48dc686ba9"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
|
@ -1322,7 +1322,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "datafusion-physical-expr"
|
||||
version = "14.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fdc83e8524df30ac5d0ae097572b7c48dc686ba9#fdc83e8524df30ac5d0ae097572b7c48dc686ba9"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
|
||||
dependencies = [
|
||||
"ahash 0.8.2",
|
||||
"arrow",
|
||||
|
@ -1351,12 +1351,15 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "datafusion-proto"
|
||||
version = "14.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fdc83e8524df30ac5d0ae097572b7c48dc686ba9#fdc83e8524df30ac5d0ae097572b7c48dc686ba9"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
"datafusion",
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"object_store",
|
||||
"parking_lot 0.12.1",
|
||||
"pbjson-build",
|
||||
"prost 0.11.3",
|
||||
"prost-build 0.11.2",
|
||||
|
@ -1365,7 +1368,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "datafusion-row"
|
||||
version = "14.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fdc83e8524df30ac5d0ae097572b7c48dc686ba9#fdc83e8524df30ac5d0ae097572b7c48dc686ba9"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
|
@ -1376,7 +1379,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "datafusion-sql"
|
||||
version = "14.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=fdc83e8524df30ac5d0ae097572b7c48dc686ba9#fdc83e8524df30ac5d0ae097572b7c48dc686ba9"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=799dd747152f6574638a844986b8ea8470d3f4d6#799dd747152f6574638a844986b8ea8470d3f4d6"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
|
@ -1542,6 +1545,7 @@ dependencies = [
|
|||
"futures",
|
||||
"libc",
|
||||
"observability_deps",
|
||||
"once_cell",
|
||||
"parking_lot 0.12.1",
|
||||
"pin-project",
|
||||
"tokio",
|
||||
|
@ -2628,7 +2632,6 @@ dependencies = [
|
|||
"itertools",
|
||||
"object_store",
|
||||
"observability_deps",
|
||||
"once_cell",
|
||||
"parking_lot 0.12.1",
|
||||
"parquet_file",
|
||||
"predicate",
|
||||
|
@ -5294,9 +5297,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.104"
|
||||
version = "1.0.105"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce"
|
||||
checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
|
|
@ -114,8 +114,8 @@ license = "MIT OR Apache-2.0"
|
|||
[workspace.dependencies]
|
||||
arrow = { version = "28.0.0" }
|
||||
arrow-flight = { version = "28.0.0" }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="fdc83e8524df30ac5d0ae097572b7c48dc686ba9", default-features = false }
|
||||
datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="fdc83e8524df30ac5d0ae097572b7c48dc686ba9" }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="799dd747152f6574638a844986b8ea8470d3f4d6", default-features = false }
|
||||
datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="799dd747152f6574638a844986b8ea8470d3f4d6" }
|
||||
hashbrown = { version = "0.13.1" }
|
||||
parquet = { version = "28.0.0" }
|
||||
|
||||
|
|
|
@ -86,7 +86,16 @@ fn create_table(results: &[RecordBatch]) -> Result<Table> {
|
|||
}
|
||||
table.set_header(header);
|
||||
|
||||
for batch in results {
|
||||
for (i, batch) in results.iter().enumerate() {
|
||||
if batch.schema() != schema {
|
||||
return Err(ArrowError::SchemaError(format!(
|
||||
"Batches have different schemas:\n\nFirst:\n{}\n\nBatch {}:\n{}",
|
||||
schema,
|
||||
i + 1,
|
||||
batch.schema()
|
||||
)));
|
||||
}
|
||||
|
||||
for row in 0..batch.num_rows() {
|
||||
let mut cells = Vec::new();
|
||||
for col in 0..batch.num_columns() {
|
||||
|
@ -112,6 +121,7 @@ mod tests {
|
|||
},
|
||||
datatypes::Int32Type,
|
||||
};
|
||||
use datafusion::common::assert_contains;
|
||||
|
||||
#[test]
|
||||
fn test_formatting() {
|
||||
|
@ -182,4 +192,16 @@ mod tests {
|
|||
expected, actual
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pretty_format_batches_checks_schemas() {
|
||||
let int64_array: ArrayRef = Arc::new([Some(2)].iter().collect::<Int64Array>());
|
||||
let uint64_array: ArrayRef = Arc::new([Some(2)].iter().collect::<UInt64Array>());
|
||||
|
||||
let batch1 = RecordBatch::try_from_iter(vec![("col", int64_array)]).unwrap();
|
||||
let batch2 = RecordBatch::try_from_iter(vec![("col", uint64_array)]).unwrap();
|
||||
|
||||
let err = pretty_format_batches(&[batch1, batch2]).unwrap_err();
|
||||
assert_contains!(err.to_string(), "Batches have different schemas:");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,8 +2,10 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use arrow::{
|
||||
array::{ArrayRef, StringArray},
|
||||
array::{new_null_array, ArrayRef, StringArray},
|
||||
compute::kernels::sort::{lexsort, SortColumn, SortOptions},
|
||||
datatypes::Schema,
|
||||
error::ArrowError,
|
||||
record_batch::RecordBatch,
|
||||
};
|
||||
|
||||
|
@ -131,3 +133,26 @@ where
|
|||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Equalize batch schemas by creating NULL columns.
|
||||
pub fn equalize_batch_schemas(batches: Vec<RecordBatch>) -> Result<Vec<RecordBatch>, ArrowError> {
|
||||
let common_schema = Arc::new(Schema::try_merge(
|
||||
batches.iter().map(|batch| batch.schema().as_ref().clone()),
|
||||
)?);
|
||||
|
||||
Ok(batches
|
||||
.into_iter()
|
||||
.map(|batch| {
|
||||
let batch_schema = batch.schema();
|
||||
let columns = common_schema
|
||||
.fields()
|
||||
.iter()
|
||||
.map(|field| match batch_schema.index_of(field.name()) {
|
||||
Ok(idx) => Arc::clone(batch.column(idx)),
|
||||
Err(_) => new_null_array(field.data_type(), batch.num_rows()),
|
||||
})
|
||||
.collect();
|
||||
RecordBatch::try_new(Arc::clone(&common_schema), columns).unwrap()
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
|
|
@ -208,6 +208,18 @@ macro_rules! gen_compactor_config {
|
|||
action
|
||||
)]
|
||||
pub hot_compaction_hours_threshold_2: u64,
|
||||
|
||||
/// Max number of partitions that can be compacted in parallel at once
|
||||
/// We use memory budget to estimate how many partitions can be compacted in parallel at once.
|
||||
/// However, we do not want to have that number too large which will cause the high usage of CPU cores
|
||||
/// and may also lead to inaccuracy of memory estimation. This number is to cap that.
|
||||
#[clap(
|
||||
long = "compaction-max-parallel-partitions",
|
||||
env = "INFLUXDB_IOX_COMPACTION_MAX_PARALLEL_PARTITIONS",
|
||||
default_value = "20",
|
||||
action
|
||||
)]
|
||||
pub max_parallel_partitions: u64,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -239,6 +251,7 @@ impl CompactorOnceConfig {
|
|||
minutes_without_new_writes_to_be_cold: self.minutes_without_new_writes_to_be_cold,
|
||||
hot_compaction_hours_threshold_1: self.hot_compaction_hours_threshold_1,
|
||||
hot_compaction_hours_threshold_2: self.hot_compaction_hours_threshold_2,
|
||||
max_parallel_partitions: self.max_parallel_partitions,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,6 +114,7 @@ mod tests {
|
|||
|
||||
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1: u64 = 4;
|
||||
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2: u64 = 24;
|
||||
const DEFAULT_MAX_PARALLEL_PARTITIONS: u64 = 20;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_compact_remaining_level_0_files_many_files() {
|
||||
|
@ -710,6 +711,7 @@ mod tests {
|
|||
minutes_without_new_writes_to_be_cold: 10,
|
||||
hot_compaction_hours_threshold_1: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1,
|
||||
hot_compaction_hours_threshold_2: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2,
|
||||
max_parallel_partitions: DEFAULT_MAX_PARALLEL_PARTITIONS,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -562,6 +562,7 @@ pub mod tests {
|
|||
|
||||
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1: u64 = 4;
|
||||
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2: u64 = 24;
|
||||
const DEFAULT_MAX_PARALLEL_PARTITIONS: u64 = 20;
|
||||
|
||||
impl PartitionCompactionCandidateWithInfo {
|
||||
pub(crate) async fn from_test_partition(test_partition: &TestPartition) -> Self {
|
||||
|
@ -697,6 +698,7 @@ pub mod tests {
|
|||
minutes_without_new_writes_to_be_cold: 10,
|
||||
hot_compaction_hours_threshold_1: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1,
|
||||
hot_compaction_hours_threshold_2: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2,
|
||||
max_parallel_partitions: DEFAULT_MAX_PARALLEL_PARTITIONS,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -173,6 +173,12 @@ pub struct CompactorConfig {
|
|||
/// When querying for partitions with data for hot compaction, how many hours to look
|
||||
/// back for a second pass if we found nothing in the first pass.
|
||||
pub hot_compaction_hours_threshold_2: u64,
|
||||
|
||||
/// Max number of partitions that can be compacted in parallel at once
|
||||
/// We use memory budget to estimate how many partitions can be compacted in parallel at once.
|
||||
/// However, we do not want to have that number too large which will cause the high usage of CPU cores
|
||||
/// and may also lead to inaccuracy of memory estimation. This number is to cap that.
|
||||
pub max_parallel_partitions: u64,
|
||||
}
|
||||
|
||||
/// How long to pause before checking for more work again if there was
|
||||
|
|
|
@ -224,6 +224,7 @@ mod tests {
|
|||
|
||||
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1: u64 = 4;
|
||||
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2: u64 = 24;
|
||||
const DEFAULT_MAX_PARALLEL_PARTITIONS: u64 = 20;
|
||||
|
||||
struct TestSetup {
|
||||
catalog: Arc<TestCatalog>,
|
||||
|
@ -544,6 +545,7 @@ mod tests {
|
|||
minutes_without_new_writes_to_be_cold: 10,
|
||||
hot_compaction_hours_threshold_1: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1,
|
||||
hot_compaction_hours_threshold_2: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2,
|
||||
max_parallel_partitions: DEFAULT_MAX_PARALLEL_PARTITIONS,
|
||||
};
|
||||
let compactor = Arc::new(Compactor::new(
|
||||
vec![shard1.shard.id, shard2.shard.id],
|
||||
|
|
|
@ -238,18 +238,22 @@ async fn compact_candidates_with_memory_budget<C, Fut>(
|
|||
}
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
// 4. Almost hitting max budget (only 10% left)
|
||||
// OR no more candidates
|
||||
// OR already considered all remaining candidates.
|
||||
// 4. Let compact the candidates that are in parallel_compacting_candidates if one of this condition hits:
|
||||
// . candidates in parallel_compacting_candidates consume almost all the budget
|
||||
// . no more candidates
|
||||
// . already considered all remaining candidates.
|
||||
// . hit the max number of partitions to compact in parallel
|
||||
if (!parallel_compacting_candidates.is_empty())
|
||||
&& ((remaining_budget_bytes <= (compactor.config.memory_budget_bytes / 10) as u64)
|
||||
|| (candidates.is_empty())
|
||||
|| (count == num_remaining_candidates))
|
||||
|| (count == num_remaining_candidates)
|
||||
|| (count as u64 == compactor.config.max_parallel_partitions))
|
||||
{
|
||||
debug!(
|
||||
num_parallel_compacting_candidates = parallel_compacting_candidates.len(),
|
||||
total_needed_memory_budget_bytes =
|
||||
compactor.config.memory_budget_bytes - remaining_budget_bytes,
|
||||
config_max_parallel_partitions = compactor.config.max_parallel_partitions,
|
||||
compaction_type,
|
||||
"parallel compacting candidate"
|
||||
);
|
||||
|
@ -452,7 +456,9 @@ pub mod tests {
|
|||
use arrow_util::assert_batches_sorted_eq;
|
||||
use backoff::BackoffConfig;
|
||||
use data_types::{ColumnType, CompactionLevel, ParquetFileId};
|
||||
use iox_tests::util::{TestCatalog, TestParquetFileBuilder, TestShard, TestTable};
|
||||
use iox_tests::util::{
|
||||
TestCatalog, TestParquetFileBuilder, TestPartition, TestShard, TestTable,
|
||||
};
|
||||
use iox_time::{SystemProvider, TimeProvider};
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
|
@ -462,6 +468,7 @@ pub mod tests {
|
|||
|
||||
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1: u64 = 4;
|
||||
const DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2: u64 = 24;
|
||||
const DEFAULT_MAX_PARALLEL_PARTITIONS: u64 = 20;
|
||||
|
||||
// In tests that are verifying successful compaction not affected by the memory budget, this
|
||||
// converts a `parquet_file_filtering::FilteredFiles` that has a `filter_result` of
|
||||
|
@ -500,7 +507,7 @@ pub mod tests {
|
|||
compactor,
|
||||
mock_compactor,
|
||||
..
|
||||
} = test_setup(14350).await;
|
||||
} = test_setup(14350, 20).await;
|
||||
|
||||
let sorted_candidates = VecDeque::new();
|
||||
|
||||
|
@ -563,7 +570,7 @@ pub mod tests {
|
|||
}
|
||||
}
|
||||
|
||||
fn make_compactor_config(budget: u64) -> CompactorConfig {
|
||||
fn make_compactor_config(budget: u64, max_parallel_jobs: u64) -> CompactorConfig {
|
||||
// All numbers in here are chosen carefully for many tests.
|
||||
// Change them will break the tests
|
||||
CompactorConfig {
|
||||
|
@ -580,6 +587,7 @@ pub mod tests {
|
|||
minutes_without_new_writes_to_be_cold: 10,
|
||||
hot_compaction_hours_threshold_1: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1,
|
||||
hot_compaction_hours_threshold_2: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2,
|
||||
max_parallel_partitions: max_parallel_jobs,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -591,10 +599,10 @@ pub mod tests {
|
|||
}
|
||||
|
||||
pub(crate) async fn test_setup_with_default_budget() -> TestSetup {
|
||||
test_setup(14350).await
|
||||
test_setup(14350, 20).await
|
||||
}
|
||||
|
||||
pub(crate) async fn test_setup(budget: u64) -> TestSetup {
|
||||
pub(crate) async fn test_setup(budget: u64, max_parallel_jobs: u64) -> TestSetup {
|
||||
let catalog = TestCatalog::new();
|
||||
let namespace = catalog
|
||||
.create_namespace_1hr_retention("namespace_hot_partitions_to_compact")
|
||||
|
@ -617,7 +625,7 @@ pub mod tests {
|
|||
|
||||
// Create a compactor
|
||||
let time_provider = Arc::new(SystemProvider::new());
|
||||
let config = make_compactor_config(budget);
|
||||
let config = make_compactor_config(budget, max_parallel_jobs);
|
||||
let compactor = Arc::new(Compactor::new(
|
||||
vec![shard.shard.id],
|
||||
Arc::clone(&catalog.catalog),
|
||||
|
@ -640,161 +648,11 @@ pub mod tests {
|
|||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hot_compact_candidates_with_memory_budget() {
|
||||
async fn test_hot_compact_candidates_with_limit_memory_budget() {
|
||||
test_helpers::maybe_start_logging();
|
||||
|
||||
let TestSetup {
|
||||
compactor,
|
||||
mock_compactor,
|
||||
shard,
|
||||
table,
|
||||
..
|
||||
} = test_setup(14350).await;
|
||||
|
||||
// Some times in the past to set to created_at of the files
|
||||
let hot_time_one_hour_ago = compactor.time_provider.hours_ago(1);
|
||||
|
||||
// P1:
|
||||
// L0 2 rows. bytes: 2,250
|
||||
// L1 2 rows. bytes: 2,250
|
||||
// total = 2,250 + 2,250 = 4,500
|
||||
let partition1 = table.with_shard(&shard).create_partition("one").await;
|
||||
|
||||
// 2 files with IDs 1 and 2
|
||||
let pf1_1 = TestParquetFileBuilder::default()
|
||||
.with_min_time(1)
|
||||
.with_max_time(5)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::Initial)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition1.create_parquet_file_catalog_record(pf1_1).await;
|
||||
|
||||
let pf1_2 = TestParquetFileBuilder::default()
|
||||
.with_min_time(4) // overlapped with pf1_1
|
||||
.with_max_time(6)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::FileNonOverlapped)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition1.create_parquet_file_catalog_record(pf1_2).await;
|
||||
|
||||
// P2:
|
||||
// L0 2 rows. bytes: 2,250
|
||||
// L1 2 rows. bytes: 2,250
|
||||
// total = 2,250 + 2,250 = 4,500
|
||||
let partition2 = table.with_shard(&shard).create_partition("two").await;
|
||||
|
||||
// 2 files with IDs 3 and 4
|
||||
let pf2_1 = TestParquetFileBuilder::default()
|
||||
.with_min_time(1)
|
||||
.with_max_time(5)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::Initial)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition2.create_parquet_file_catalog_record(pf2_1).await;
|
||||
|
||||
let pf2_2 = TestParquetFileBuilder::default()
|
||||
.with_min_time(4) // overlapped with pf2_1
|
||||
.with_max_time(6)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::FileNonOverlapped)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition2.create_parquet_file_catalog_record(pf2_2).await;
|
||||
|
||||
// P3: bytes >= 90% of full budget = 90% * 14,350 = 12,915
|
||||
// L0 40 rows. bytes: 2,250
|
||||
// Five L1s. bytes: 2,250 each
|
||||
// total = 2,250 * 6 = 13,500
|
||||
let partition3 = table.with_shard(&shard).create_partition("three").await;
|
||||
|
||||
// 6 files with IDs 5, 6, 7, 8, 9, 10
|
||||
let pf3_1 = TestParquetFileBuilder::default()
|
||||
.with_min_time(1)
|
||||
.with_max_time(6)
|
||||
.with_row_count(40)
|
||||
.with_compaction_level(CompactionLevel::Initial)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition3.create_parquet_file_catalog_record(pf3_1).await;
|
||||
|
||||
// Five overlapped L1 files
|
||||
for i in 1..6 {
|
||||
let pf3_i = TestParquetFileBuilder::default()
|
||||
.with_min_time(i) // overlapped with pf3_1
|
||||
.with_max_time(i)
|
||||
.with_row_count(24)
|
||||
.with_compaction_level(CompactionLevel::FileNonOverlapped)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition3.create_parquet_file_catalog_record(pf3_i).await;
|
||||
}
|
||||
|
||||
// P4: Over the full budget
|
||||
// L0 40 rows. bytes: 2,250
|
||||
// Six L1s. bytes: 2,250 each
|
||||
// total = 2,250 * 7 = 15,750 > 14350
|
||||
let partition4 = table.with_shard(&shard).create_partition("four").await;
|
||||
|
||||
// 7 files with IDs 11, 12, 13, 14, 15, 16, 17
|
||||
let pf4_1 = TestParquetFileBuilder::default()
|
||||
.with_min_time(1)
|
||||
.with_max_time(7)
|
||||
.with_row_count(70)
|
||||
.with_compaction_level(CompactionLevel::Initial)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition4.create_parquet_file_catalog_record(pf4_1).await;
|
||||
|
||||
// Six overlapped L1 files
|
||||
for i in 1..7 {
|
||||
let pf4_i = TestParquetFileBuilder::default()
|
||||
.with_min_time(i) // overlapped with pf4_1
|
||||
.with_max_time(i)
|
||||
.with_row_count(40)
|
||||
.with_compaction_level(CompactionLevel::FileNonOverlapped)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition4.create_parquet_file_catalog_record(pf4_i).await;
|
||||
}
|
||||
|
||||
// P5:
|
||||
// L0 2 rows. bytes: 2,250
|
||||
// L1 2 rows. bytes: 2,250
|
||||
// total = 2,250 + 2,250 = 4,500
|
||||
let partition5 = table.with_shard(&shard).create_partition("five").await;
|
||||
// 2 files with IDs 18, 19
|
||||
let pf5_1 = TestParquetFileBuilder::default()
|
||||
.with_min_time(1)
|
||||
.with_max_time(5)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::Initial)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition5.create_parquet_file_catalog_record(pf5_1).await;
|
||||
|
||||
let pf5_2 = TestParquetFileBuilder::default()
|
||||
.with_min_time(4) // overlapped with pf5_1
|
||||
.with_max_time(6)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::FileNonOverlapped)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition5.create_parquet_file_catalog_record(pf5_2).await;
|
||||
|
||||
// P6:
|
||||
// L0 2 rows. bytes: 2,250
|
||||
// L1 2 rows. bytes: 2,250
|
||||
// total = 2,250 + 2,250 = 4,500
|
||||
let partition6 = table.with_shard(&shard).create_partition("six").await;
|
||||
// 2 files with IDs 20, 21
|
||||
let pf6_1 = TestParquetFileBuilder::default()
|
||||
.with_min_time(1)
|
||||
.with_max_time(5)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::Initial)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition6.create_parquet_file_catalog_record(pf6_1).await;
|
||||
|
||||
let pf6_2 = TestParquetFileBuilder::default()
|
||||
.with_min_time(4) // overlapped with pf6_1
|
||||
.with_max_time(6)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::FileNonOverlapped)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition6.create_parquet_file_catalog_record(pf6_2).await;
|
||||
// test setup with limit memory budget, 14350, and very large (aka unlimited in this test) max_parallel_jobs, 200
|
||||
let (compactor, mock_compactor, partitions) = make_6_partitions(14350, 200).await;
|
||||
|
||||
// partition candidates: partitions with L0 and overlapped L1
|
||||
let mut candidates = hot::hot_partitions_to_compact(Arc::clone(&compactor))
|
||||
|
@ -838,19 +696,19 @@ pub mod tests {
|
|||
assert_eq!(group1.len(), 3);
|
||||
|
||||
let g1_candidate1 = &group1[0];
|
||||
assert_eq!(g1_candidate1.partition.id(), partition1.partition.id);
|
||||
assert_eq!(g1_candidate1.partition.id(), partitions[0].partition.id);
|
||||
let g1_candidate1_pf_ids: Vec<_> =
|
||||
g1_candidate1.files.iter().map(|pf| pf.id().get()).collect();
|
||||
assert_eq!(g1_candidate1_pf_ids, vec![2, 1]);
|
||||
|
||||
let g1_candidate2 = &group1[1];
|
||||
assert_eq!(g1_candidate2.partition.id(), partition2.partition.id);
|
||||
assert_eq!(g1_candidate2.partition.id(), partitions[1].partition.id);
|
||||
let g1_candidate2_pf_ids: Vec<_> =
|
||||
g1_candidate2.files.iter().map(|pf| pf.id().get()).collect();
|
||||
assert_eq!(g1_candidate2_pf_ids, vec![4, 3]);
|
||||
|
||||
let g1_candidate3 = &group1[2];
|
||||
assert_eq!(g1_candidate3.partition.id(), partition5.partition.id);
|
||||
assert_eq!(g1_candidate3.partition.id(), partitions[4].partition.id);
|
||||
let g1_candidate3_pf_ids: Vec<_> =
|
||||
g1_candidate3.files.iter().map(|pf| pf.id().get()).collect();
|
||||
assert_eq!(g1_candidate3_pf_ids, vec![19, 18]);
|
||||
|
@ -860,7 +718,7 @@ pub mod tests {
|
|||
assert_eq!(group2.len(), 1);
|
||||
|
||||
let g2_candidate1 = &group2[0];
|
||||
assert_eq!(g2_candidate1.partition.id(), partition6.partition.id);
|
||||
assert_eq!(g2_candidate1.partition.id(), partitions[5].partition.id);
|
||||
let g2_candidate1_pf_ids: Vec<_> =
|
||||
g2_candidate1.files.iter().map(|pf| pf.id().get()).collect();
|
||||
assert_eq!(g2_candidate1_pf_ids, vec![21, 20]);
|
||||
|
@ -870,21 +728,99 @@ pub mod tests {
|
|||
assert_eq!(group3.len(), 1);
|
||||
|
||||
let g3_candidate1 = &group3[0];
|
||||
assert_eq!(g3_candidate1.partition.id(), partition3.partition.id);
|
||||
assert_eq!(g3_candidate1.partition.id(), partitions[2].partition.id);
|
||||
let g3_candidate1_pf_ids: Vec<_> =
|
||||
g3_candidate1.files.iter().map(|pf| pf.id().get()).collect();
|
||||
// all IDs of level-1 firts then level-0
|
||||
// all IDs of level-1 first then level-0
|
||||
assert_eq!(g3_candidate1_pf_ids, vec![6, 7, 8, 9, 10, 5]);
|
||||
|
||||
{
|
||||
let mut repos = compactor.catalog.repositories().await;
|
||||
let skipped_compactions = repos.partitions().list_skipped_compactions().await.unwrap();
|
||||
assert_eq!(skipped_compactions.len(), 1);
|
||||
assert_eq!(skipped_compactions[0].partition_id, partition4.partition.id);
|
||||
assert_eq!(
|
||||
skipped_compactions[0].partition_id,
|
||||
partitions[3].partition.id
|
||||
);
|
||||
assert_eq!(skipped_compactions[0].reason, "over memory budget");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hot_compact_candidates_with_limit_parallel_jobs() {
|
||||
test_helpers::maybe_start_logging();
|
||||
|
||||
// tes setup with plenty of memory budget 1GB (aka unlimited) but limit to 2 parallel jobs
|
||||
let (compactor, mock_compactor, partitions) =
|
||||
make_6_partitions(1024 * 1024 * 1024, 2).await;
|
||||
|
||||
// partition candidates: partitions with L0 and overlapped L1
|
||||
let mut candidates = hot::hot_partitions_to_compact(Arc::clone(&compactor))
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(candidates.len(), 6);
|
||||
candidates.sort_by_key(|c| c.candidate.partition_id);
|
||||
{
|
||||
let mut repos = compactor.catalog.repositories().await;
|
||||
let skipped_compactions = repos.partitions().list_skipped_compactions().await.unwrap();
|
||||
assert!(
|
||||
skipped_compactions.is_empty(),
|
||||
"Expected no skipped compactions, got: {skipped_compactions:?}"
|
||||
);
|
||||
}
|
||||
|
||||
// There are 3 rounds of parallel compaction:
|
||||
//
|
||||
// * Round 1: 2 candidates [P1, P2]
|
||||
// * Round 2: 2 candidate [P3, P4]
|
||||
// * Round 3: 1 candidate [P5, P6]
|
||||
|
||||
compact_candidates_with_memory_budget(
|
||||
Arc::clone(&compactor),
|
||||
"hot",
|
||||
CompactionLevel::Initial,
|
||||
mock_compactor.compaction_function(),
|
||||
true,
|
||||
candidates.into(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let compaction_groups = mock_compactor.results();
|
||||
|
||||
// 3 rounds of parallel compaction
|
||||
assert_eq!(compaction_groups.len(), 3);
|
||||
|
||||
// Round 1
|
||||
let group1 = &compaction_groups[0];
|
||||
assert_eq!(group1.len(), 2);
|
||||
|
||||
let g1_candidate1 = &group1[0];
|
||||
assert_eq!(g1_candidate1.partition.id(), partitions[0].partition.id);
|
||||
|
||||
let g1_candidate2 = &group1[1];
|
||||
assert_eq!(g1_candidate2.partition.id(), partitions[1].partition.id);
|
||||
|
||||
// Round 2
|
||||
let group2 = &compaction_groups[1];
|
||||
assert_eq!(group2.len(), 2);
|
||||
|
||||
let g2_candidate1 = &group2[0];
|
||||
assert_eq!(g2_candidate1.partition.id(), partitions[2].partition.id);
|
||||
|
||||
let g2_candidate2 = &group2[1];
|
||||
assert_eq!(g2_candidate2.partition.id(), partitions[3].partition.id);
|
||||
|
||||
// Round 3
|
||||
let group3 = &compaction_groups[2];
|
||||
assert_eq!(group3.len(), 2);
|
||||
|
||||
let g3_candidate1 = &group3[0];
|
||||
assert_eq!(g3_candidate1.partition.id(), partitions[4].partition.id);
|
||||
|
||||
let g3_candidate2 = &group3[1];
|
||||
assert_eq!(g3_candidate2.partition.id(), partitions[5].partition.id);
|
||||
}
|
||||
|
||||
// A quite sophisticated integration test of compacting one hot partition
|
||||
// Beside lp data, every value min/max sequence numbers and min/max time are important
|
||||
// to have a combination of needed tests in this test function
|
||||
|
@ -962,6 +898,7 @@ pub mod tests {
|
|||
minutes_without_new_writes_to_be_cold: 10,
|
||||
hot_compaction_hours_threshold_1: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_1,
|
||||
hot_compaction_hours_threshold_2: DEFAULT_HOT_COMPACTION_HOURS_THRESHOLD_2,
|
||||
max_parallel_partitions: DEFAULT_MAX_PARALLEL_PARTITIONS,
|
||||
};
|
||||
|
||||
let metrics = Arc::new(metric::Registry::new());
|
||||
|
@ -1153,4 +1090,172 @@ pub mod tests {
|
|||
&batches
|
||||
);
|
||||
}
|
||||
|
||||
async fn make_6_partitions(
|
||||
budget: u64,
|
||||
max_parallel_jobs: u64,
|
||||
) -> (Arc<Compactor>, MockCompactor, Vec<Arc<TestPartition>>) {
|
||||
let TestSetup {
|
||||
compactor,
|
||||
mock_compactor,
|
||||
shard,
|
||||
table,
|
||||
..
|
||||
} = test_setup(budget, max_parallel_jobs).await;
|
||||
|
||||
// Some times in the past to set to created_at of the files
|
||||
let hot_time_one_hour_ago = compactor.time_provider.hours_ago(1);
|
||||
|
||||
let mut partitions = Vec::with_capacity(6);
|
||||
|
||||
// P1:
|
||||
// L0 2 rows. bytes: 2,250
|
||||
// L1 2 rows. bytes: 2,250
|
||||
// total = 2,250 + 2,250 = 4,500
|
||||
let partition1 = table.with_shard(&shard).create_partition("one").await;
|
||||
|
||||
// 2 files with IDs 1 and 2
|
||||
let pf1_1 = TestParquetFileBuilder::default()
|
||||
.with_min_time(1)
|
||||
.with_max_time(5)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::Initial)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition1.create_parquet_file_catalog_record(pf1_1).await;
|
||||
|
||||
let pf1_2 = TestParquetFileBuilder::default()
|
||||
.with_min_time(4) // overlapped with pf1_1
|
||||
.with_max_time(6)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::FileNonOverlapped)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition1.create_parquet_file_catalog_record(pf1_2).await;
|
||||
partitions.push(partition1);
|
||||
|
||||
// P2:
|
||||
// L0 2 rows. bytes: 2,250
|
||||
// L1 2 rows. bytes: 2,250
|
||||
// total = 2,250 + 2,250 = 4,500
|
||||
let partition2 = table.with_shard(&shard).create_partition("two").await;
|
||||
|
||||
// 2 files with IDs 3 and 4
|
||||
let pf2_1 = TestParquetFileBuilder::default()
|
||||
.with_min_time(1)
|
||||
.with_max_time(5)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::Initial)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition2.create_parquet_file_catalog_record(pf2_1).await;
|
||||
|
||||
let pf2_2 = TestParquetFileBuilder::default()
|
||||
.with_min_time(4) // overlapped with pf2_1
|
||||
.with_max_time(6)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::FileNonOverlapped)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition2.create_parquet_file_catalog_record(pf2_2).await;
|
||||
partitions.push(partition2);
|
||||
|
||||
// P3: bytes >= 90% of full budget = 90% * 14,350 = 12,915
|
||||
// L0 40 rows. bytes: 2,250
|
||||
// Five L1s. bytes: 2,250 each
|
||||
// total = 2,250 * 6 = 13,500
|
||||
let partition3 = table.with_shard(&shard).create_partition("three").await;
|
||||
|
||||
// 6 files with IDs 5, 6, 7, 8, 9, 10
|
||||
let pf3_1 = TestParquetFileBuilder::default()
|
||||
.with_min_time(1)
|
||||
.with_max_time(6)
|
||||
.with_row_count(40)
|
||||
.with_compaction_level(CompactionLevel::Initial)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition3.create_parquet_file_catalog_record(pf3_1).await;
|
||||
|
||||
// Five overlapped L1 files
|
||||
for i in 1..6 {
|
||||
let pf3_i = TestParquetFileBuilder::default()
|
||||
.with_min_time(i) // overlapped with pf3_1
|
||||
.with_max_time(i)
|
||||
.with_row_count(24)
|
||||
.with_compaction_level(CompactionLevel::FileNonOverlapped)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition3.create_parquet_file_catalog_record(pf3_i).await;
|
||||
}
|
||||
partitions.push(partition3);
|
||||
|
||||
// P4: Over the full budget
|
||||
// L0 40 rows. bytes: 2,250
|
||||
// Six L1s. bytes: 2,250 each
|
||||
// total = 2,250 * 7 = 15,750 > 14350
|
||||
let partition4 = table.with_shard(&shard).create_partition("four").await;
|
||||
|
||||
// 7 files with IDs 11, 12, 13, 14, 15, 16, 17
|
||||
let pf4_1 = TestParquetFileBuilder::default()
|
||||
.with_min_time(1)
|
||||
.with_max_time(7)
|
||||
.with_row_count(70)
|
||||
.with_compaction_level(CompactionLevel::Initial)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition4.create_parquet_file_catalog_record(pf4_1).await;
|
||||
|
||||
// Six overlapped L1 files
|
||||
for i in 1..7 {
|
||||
let pf4_i = TestParquetFileBuilder::default()
|
||||
.with_min_time(i) // overlapped with pf4_1
|
||||
.with_max_time(i)
|
||||
.with_row_count(40)
|
||||
.with_compaction_level(CompactionLevel::FileNonOverlapped)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition4.create_parquet_file_catalog_record(pf4_i).await;
|
||||
}
|
||||
partitions.push(partition4);
|
||||
|
||||
// P5:
|
||||
// L0 2 rows. bytes: 2,250
|
||||
// L1 2 rows. bytes: 2,250
|
||||
// total = 2,250 + 2,250 = 4,500
|
||||
let partition5 = table.with_shard(&shard).create_partition("five").await;
|
||||
// 2 files with IDs 18, 19
|
||||
let pf5_1 = TestParquetFileBuilder::default()
|
||||
.with_min_time(1)
|
||||
.with_max_time(5)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::Initial)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition5.create_parquet_file_catalog_record(pf5_1).await;
|
||||
|
||||
let pf5_2 = TestParquetFileBuilder::default()
|
||||
.with_min_time(4) // overlapped with pf5_1
|
||||
.with_max_time(6)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::FileNonOverlapped)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition5.create_parquet_file_catalog_record(pf5_2).await;
|
||||
partitions.push(partition5);
|
||||
|
||||
// P6:
|
||||
// L0 2 rows. bytes: 2,250
|
||||
// L1 2 rows. bytes: 2,250
|
||||
// total = 2,250 + 2,250 = 4,500
|
||||
let partition6 = table.with_shard(&shard).create_partition("six").await;
|
||||
// 2 files with IDs 20, 21
|
||||
let pf6_1 = TestParquetFileBuilder::default()
|
||||
.with_min_time(1)
|
||||
.with_max_time(5)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::Initial)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition6.create_parquet_file_catalog_record(pf6_1).await;
|
||||
|
||||
let pf6_2 = TestParquetFileBuilder::default()
|
||||
.with_min_time(4) // overlapped with pf6_1
|
||||
.with_max_time(6)
|
||||
.with_row_count(2)
|
||||
.with_compaction_level(CompactionLevel::FileNonOverlapped)
|
||||
.with_creation_time(hot_time_one_hour_ago);
|
||||
partition6.create_parquet_file_catalog_record(pf6_2).await;
|
||||
partitions.push(partition6);
|
||||
|
||||
(compactor, mock_compactor, partitions)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -62,11 +62,17 @@ async fn watch_task<S>(
|
|||
let msg = match task_result {
|
||||
Err(join_err) => {
|
||||
debug!(e=%join_err, %description, "Error joining");
|
||||
Some(format!("Join error for '{description}': {join_err}"))
|
||||
Some(DataFusionError::Context(
|
||||
format!("Join error for '{description}'"),
|
||||
Box::new(DataFusionError::External(Box::new(join_err))),
|
||||
))
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
debug!(%e, %description, "Error in task itself");
|
||||
Some(format!("Execution error for '{description}': {e}"))
|
||||
Some(DataFusionError::Context(
|
||||
format!("Execution error for '{description}'"),
|
||||
Box::new(DataFusionError::ArrowError(e)),
|
||||
))
|
||||
}
|
||||
Ok(Ok(())) => {
|
||||
// successful
|
||||
|
@ -76,12 +82,13 @@ async fn watch_task<S>(
|
|||
|
||||
// If there is a message to send down the channel, try and do so
|
||||
if let Some(e) = msg {
|
||||
let e = Arc::new(e);
|
||||
for tx in tx {
|
||||
// try and tell the receiver something went
|
||||
// wrong. Note we ignore errors sending this message
|
||||
// as that means the receiver has already been
|
||||
// shutdown and no one cares anymore lol
|
||||
let err: ArrowError = DataFusionError::Execution(e.clone()).into();
|
||||
let err = ArrowError::ExternalError(Box::new(Arc::clone(&e)));
|
||||
if tx.send(Err(err)).await.is_err() {
|
||||
debug!(%description, "receiver hung up");
|
||||
}
|
||||
|
|
|
@ -35,9 +35,11 @@ In the case the Compactor cannot compact the smallest set of files of a partitio
|
|||
Even though we have tried to avoid OOMs by estimating needed memory, it still happens in extreme cases. Currently, OOMs in compactor won't be resolved by themselves without human actions because the compactor will likely choose the same heavy partitions to compact after it is restarted. The easiest way to stop OOMs is to increase memory. Doubling memory or even more is recommended. We can bring the memory down after all the high volume partitions are compacted.
|
||||
|
||||
If increasing memory a lot does not help, consider changing one or a combination of config parameters below but please be aware that by choosing to do this, you may be telling the compactor that it has a lot less memory budget to use and it will push one or many partitions into the `skipped_compactions` list. Moreover, reducing memory budget also means reducing the concurrency capacity of the compactor. It is recommended that you do not try this unless you know the workload of the compactor very well.
|
||||
- `INFLUXDB_IOX_COMPACTION_MAX_PARALLEL_PARTITIONS`: Reduce this value in half or more. This will put a hard cap on the maximun number of partitions to be compacted in parallel. This will help reduce both CPU and memory usage a lot and should be your first choice to adjust.
|
||||
- `INFLUXDB_IOX_COMPACTION_MIN_ROWS_PER_RECORD_BATCH_TO_PLAN`: double or triple this value. This will tell the compactor the compaction plans need more memory, and will reduce the number of files that can be compacted at once
|
||||
- `INFLUXDB_IOX_COMPACTION_MEMORY_BUDGET_BYTES`: reduce this value in half or more. This tells the compactor its total budget is less so it will reduce the number of partitions it can compact concurrently or reduce the number of files to be compacted for a partition.
|
||||
- `INFLUXDB_IOX_COMPACTION_MAX_COMPACTING_FILES`: reduce this value in half or more. This puts a hard cap on the maximum number of files of a partition it can compact, even if its memory budget estimate would allow for more.
|
||||
- `INFLUXDB_IOX_COMPACTION_MEMORY_BUDGET_BYTES`: reduce this value in half or more. This tells the compact its total budget is less so it will reduce the number of partitions it can compact concurrently or reduce the number of files to be compacted for a partition.
|
||||
- `INFLUXDB_IOX_COMPACTION_MAX_COMPACTING_FILES_FIRST_IN_PARTITION`: This should be the last choice to adjust. Reduce this value in half or more but this action only helps if the number of L1s that overlap with the first L0 are large. This is similar to `INFLUXDB_IOX_COMPACTION_MAX_COMPACTING_FILES` but on the set of files that the compactor must compact first for a partition. If the number of files in this set is larger than the settings, the compactor will ignore compacting this partition and put it in `skipped_comapctions` catalog table.
|
||||
|
||||
# Compactor Config Parameters
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ license.workspace = true
|
|||
[dependencies]
|
||||
futures = "0.3"
|
||||
observability_deps = { path = "../observability_deps" }
|
||||
once_cell = { version = "1.16.0", features = ["parking_lot"] }
|
||||
parking_lot = "0.12"
|
||||
pin-project = "1.0"
|
||||
tokio = { version = "1.22" }
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
clippy::dbg_macro
|
||||
)]
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use parking_lot::Mutex;
|
||||
use pin_project::{pin_project, pinned_drop};
|
||||
use std::{pin::Pin, sync::Arc};
|
||||
|
@ -100,6 +101,11 @@ impl<T> PinnedDrop for Job<T> {
|
|||
#[derive(Clone)]
|
||||
pub struct DedicatedExecutor {
|
||||
state: Arc<Mutex<State>>,
|
||||
|
||||
/// Used for testing.
|
||||
///
|
||||
/// This will ignore explicit shutdown requests.
|
||||
testing: bool,
|
||||
}
|
||||
|
||||
/// Runs futures (and any `tasks` that are `tokio::task::spawned` by
|
||||
|
@ -150,6 +156,10 @@ impl std::fmt::Debug for DedicatedExecutor {
|
|||
}
|
||||
}
|
||||
|
||||
/// [`DedicatedExecutor`] for testing purposes.
|
||||
static TESTING_EXECUTOR: Lazy<DedicatedExecutor> =
|
||||
Lazy::new(|| DedicatedExecutor::new_inner("testing", 1, true));
|
||||
|
||||
impl DedicatedExecutor {
|
||||
/// Creates a new `DedicatedExecutor` with a dedicated tokio
|
||||
/// executor that is separate from the threadpool created via
|
||||
|
@ -168,6 +178,10 @@ impl DedicatedExecutor {
|
|||
/// happens when a runtime is dropped from within an asynchronous
|
||||
/// context.', .../tokio-1.4.0/src/runtime/blocking/shutdown.rs:51:21
|
||||
pub fn new(thread_name: &str, num_threads: usize) -> Self {
|
||||
Self::new_inner(thread_name, num_threads, false)
|
||||
}
|
||||
|
||||
fn new_inner(thread_name: &str, num_threads: usize, testing: bool) -> Self {
|
||||
let thread_name = thread_name.to_string();
|
||||
|
||||
let (tx_tasks, rx_tasks) = std::sync::mpsc::channel::<Task>();
|
||||
|
@ -215,9 +229,17 @@ impl DedicatedExecutor {
|
|||
|
||||
Self {
|
||||
state: Arc::new(Mutex::new(state)),
|
||||
testing,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new executor for testing purposes.
|
||||
///
|
||||
/// Internal state may be shared with other tests.
|
||||
pub fn new_testing() -> Self {
|
||||
TESTING_EXECUTOR.clone()
|
||||
}
|
||||
|
||||
/// Runs the specified Future (and any tasks it spawns) on the
|
||||
/// `DedicatedExecutor`.
|
||||
///
|
||||
|
@ -268,6 +290,10 @@ impl DedicatedExecutor {
|
|||
|
||||
/// signals shutdown of this executor and any Clones
|
||||
pub fn shutdown(&self) {
|
||||
if self.testing {
|
||||
return;
|
||||
}
|
||||
|
||||
// hang up the channel which will cause the dedicated thread
|
||||
// to quit
|
||||
let mut state = self.state.lock();
|
||||
|
@ -287,6 +313,10 @@ impl DedicatedExecutor {
|
|||
/// [`join`](Self::join) manually during [`Drop`] or panics because this might lead to another panic, see
|
||||
/// <https://github.com/rust-lang/futures-rs/issues/2575>.
|
||||
pub async fn join(&self) {
|
||||
if self.testing {
|
||||
return;
|
||||
}
|
||||
|
||||
self.shutdown();
|
||||
|
||||
// get handle mutex is held
|
||||
|
|
|
@ -46,6 +46,7 @@ pub mod simple_from_clause;
|
|||
pub mod statement;
|
||||
pub mod string;
|
||||
pub mod visit;
|
||||
pub mod visit_mut;
|
||||
|
||||
/// A error returned when parsing an InfluxQL query using
|
||||
/// [`parse_statements`] fails.
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"DELETE WHERE 'foo bar' =~ /foo/\")"
|
||||
---
|
||||
- "pre_visit_statement: Delete(Where(WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) })))"
|
||||
- "pre_visit_delete_statement: Where(WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) }))"
|
||||
- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) })"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) }"
|
||||
- "pre_visit_conditional_expression: Expr(Literal(String(\"foo bar\")))"
|
||||
- "pre_visit_expr: Literal(String(\"foo bar\"))"
|
||||
- "post_visit_expr: Literal(String(\"foo bar\"))"
|
||||
- "post_visit_conditional_expression: Expr(Literal(String(\"foo bar\")))"
|
||||
- "pre_visit_conditional_expression: Expr(Literal(Regex(Regex(\"foo\"))))"
|
||||
- "pre_visit_expr: Literal(Regex(Regex(\"foo\")))"
|
||||
- "post_visit_expr: Literal(Regex(Regex(\"foo\")))"
|
||||
- "post_visit_conditional_expression: Expr(Literal(Regex(Regex(\"foo\"))))"
|
||||
- "post_visit_conditional_expression: Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) }"
|
||||
- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) })"
|
||||
- "post_visit_delete_statement: Where(WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) }))"
|
||||
- "post_visit_statement: Delete(Where(WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) })))"
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"DELETE FROM cpu\")"
|
||||
---
|
||||
- "pre_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }, condition: None })"
|
||||
- "pre_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }, condition: None }"
|
||||
- "pre_visit_delete_from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_delete_from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }"
|
||||
- "post_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }, condition: None }"
|
||||
- "post_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }, condition: None })"
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"DELETE FROM /^cpu/\")"
|
||||
---
|
||||
- "pre_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }, condition: None })"
|
||||
- "pre_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }, condition: None }"
|
||||
- "pre_visit_delete_from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }"
|
||||
- "pre_visit_measurement_name: Regex(Regex(\"^cpu\"))"
|
||||
- "post_visit_measurement_name: Regex(Regex(\"^cpu\"))"
|
||||
- "post_visit_delete_from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }"
|
||||
- "post_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }, condition: None }"
|
||||
- "post_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }, condition: None })"
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"DELETE FROM a WHERE b = \\\"c\\\"\")"
|
||||
---
|
||||
- "pre_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Name(Identifier(\"a\"))] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })) })"
|
||||
- "pre_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Name(Identifier(\"a\"))] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })) }"
|
||||
- "pre_visit_delete_from: OneOrMore { contents: [Name(Identifier(\"a\"))] }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"a\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"a\"))"
|
||||
- "post_visit_delete_from: OneOrMore { contents: [Name(Identifier(\"a\"))] }"
|
||||
- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) }"
|
||||
- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"b\"), data_type: None })"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"b\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"b\"), data_type: None }"
|
||||
- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"b\"), data_type: None })"
|
||||
- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"c\"), data_type: None })"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"c\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"c\"), data_type: None }"
|
||||
- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"c\"), data_type: None })"
|
||||
- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) }"
|
||||
- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })"
|
||||
- "post_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Name(Identifier(\"a\"))] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })) }"
|
||||
- "post_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Name(Identifier(\"a\"))] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })) })"
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"DROP MEASUREMENT cpu\")"
|
||||
---
|
||||
- "pre_visit_statement: DropMeasurement(DropMeasurementStatement { name: Identifier(\"cpu\") })"
|
||||
- "pre_visit_drop_measurement_statement: DropMeasurementStatement { name: Identifier(\"cpu\") }"
|
||||
- "post_visit_drop_measurement_statement: DropMeasurementStatement { name: Identifier(\"cpu\") }"
|
||||
- "post_visit_statement: DropMeasurement(DropMeasurementStatement { name: Identifier(\"cpu\") })"
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"EXPLAIN SELECT * FROM cpu\")"
|
||||
---
|
||||
- "pre_visit_statement: Explain(ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } })"
|
||||
- "pre_visit_explain_statement: ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } }"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Wildcard(None), alias: None }"
|
||||
- "pre_visit_expr: Wildcard(None)"
|
||||
- "post_visit_expr: Wildcard(None)"
|
||||
- "post_visit_select_field: Field { expr: Wildcard(None), alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_explain_statement: ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } }"
|
||||
- "post_visit_statement: Explain(ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } })"
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(r#\"SELECT DISTINCT value FROM temp\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Distinct(Identifier(\"value\")), alias: None }"
|
||||
- "pre_visit_expr: Distinct(Identifier(\"value\"))"
|
||||
- "post_visit_expr: Distinct(Identifier(\"value\"))"
|
||||
- "post_visit_select_field: Field { expr: Distinct(Identifier(\"value\")), alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(r#\"SELECT COUNT(value) FROM temp\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }"
|
||||
- "pre_visit_expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }"
|
||||
- "post_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(r#\"SELECT COUNT(DISTINCT value) FROM temp\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }"
|
||||
- "pre_visit_expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }"
|
||||
- "pre_visit_expr: Distinct(Identifier(\"value\"))"
|
||||
- "post_visit_expr: Distinct(Identifier(\"value\"))"
|
||||
- "post_visit_expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }"
|
||||
- "post_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(r#\"SELECT * FROM /cpu/, memory\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Wildcard(None), alias: None }"
|
||||
- "pre_visit_expr: Wildcard(None)"
|
||||
- "post_visit_expr: Wildcard(None)"
|
||||
- "post_visit_select_field: Field { expr: Wildcard(None), alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }"
|
||||
- "pre_visit_measurement_name: Regex(Regex(\"cpu\"))"
|
||||
- "post_visit_measurement_name: Regex(Regex(\"cpu\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) })"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"memory\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"memory\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE host = \"node1\")\n WHERE region =~ /west/ AND value > 5\n GROUP BY TIME(5m), host\n FILL(previous)\n ORDER BY TIME DESC\n LIMIT 1 OFFSET 2\n SLIMIT 3 SOFFSET 4\n TZ('Australia/Hobart')\n \"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }"
|
||||
- "pre_visit_select_measurement_selection: Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"usage\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"usage\"), data_type: None }"
|
||||
- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) }"
|
||||
- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }"
|
||||
- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })"
|
||||
- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"node1\"), data_type: None })"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"node1\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"node1\"), data_type: None }"
|
||||
- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"node1\"), data_type: None })"
|
||||
- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) }"
|
||||
- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_select_measurement_selection: Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }"
|
||||
- "pre_visit_where_clause: WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } }"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }"
|
||||
- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"region\"), data_type: None })"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"region\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"region\"), data_type: None }"
|
||||
- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"region\"), data_type: None })"
|
||||
- "pre_visit_conditional_expression: Expr(Literal(Regex(Regex(\"west\"))))"
|
||||
- "pre_visit_expr: Literal(Regex(Regex(\"west\")))"
|
||||
- "post_visit_expr: Literal(Regex(Regex(\"west\")))"
|
||||
- "post_visit_conditional_expression: Expr(Literal(Regex(Regex(\"west\"))))"
|
||||
- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) }"
|
||||
- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"value\"), data_type: None })"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"value\"), data_type: None })"
|
||||
- "pre_visit_conditional_expression: Expr(Literal(Unsigned(5)))"
|
||||
- "pre_visit_expr: Literal(Unsigned(5))"
|
||||
- "post_visit_expr: Literal(Unsigned(5))"
|
||||
- "post_visit_conditional_expression: Expr(Literal(Unsigned(5)))"
|
||||
- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) }"
|
||||
- "post_visit_conditional_expression: Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } }"
|
||||
- "post_visit_where_clause: WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })"
|
||||
- "pre_visit_group_by_clause: OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }"
|
||||
- "pre_visit_select_dimension: Time { interval: Literal(Duration(Duration(300000000000))), offset: None }"
|
||||
- "pre_visit_expr: Literal(Duration(Duration(300000000000)))"
|
||||
- "post_visit_expr: Literal(Duration(Duration(300000000000)))"
|
||||
- "post_visit_select_dimension: Time { interval: Literal(Duration(Duration(300000000000))), offset: None }"
|
||||
- "pre_visit_select_dimension: Tag(Identifier(\"host\"))"
|
||||
- "post_visit_select_dimension: Tag(Identifier(\"host\"))"
|
||||
- "post_visit_group_by_clause: OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }"
|
||||
- "pre_visit_fill_clause: Previous"
|
||||
- "post_visit_fill_clause: Previous"
|
||||
- "pre_visit_order_by_clause: Descending"
|
||||
- "post_visit_order_by_clause: Descending"
|
||||
- "pre_visit_limit_clause: LimitClause(1)"
|
||||
- "post_visit_limit_clause: LimitClause(1)"
|
||||
- "pre_visit_offset_clause: OffsetClause(2)"
|
||||
- "post_visit_offset_clause: OffsetClause(2)"
|
||||
- "pre_visit_slimit_clause: SLimitClause(3)"
|
||||
- "post_visit_slimit_clause: SLimitClause(3)"
|
||||
- "pre_visit_soffset_clause: SOffsetClause(4)"
|
||||
- "post_visit_soffset_clause: SOffsetClause(4)"
|
||||
- "pre_visit_timezone_clause: TimeZoneClause(\"Australia/Hobart\")"
|
||||
- "post_visit_timezone_clause: TimeZoneClause(\"Australia/Hobart\")"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })"
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(r#\"SELECT value FROM temp\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW DATABASES\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowDatabases(ShowDatabasesStatement)"
|
||||
- "pre_visit_show_databases_statement: ShowDatabasesStatement"
|
||||
- "post_visit_show_databases_statement: ShowDatabasesStatement"
|
||||
- "post_visit_statement: ShowDatabases(ShowDatabasesStatement)"
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW FIELD KEYS ON telegraf\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: None, limit: None, offset: None })"
|
||||
- "pre_visit_show_field_keys_statement: ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: None, limit: None, offset: None }"
|
||||
- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))"
|
||||
- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))"
|
||||
- "post_visit_show_field_keys_statement: ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: None, limit: None, offset: None }"
|
||||
- "post_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: None, limit: None, offset: None })"
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW FIELD KEYS FROM cpu\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: None, from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), limit: None, offset: None })"
|
||||
- "pre_visit_show_field_keys_statement: ShowFieldKeysStatement { database: None, from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), limit: None, offset: None }"
|
||||
- "pre_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "post_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }"
|
||||
- "post_visit_show_field_keys_statement: ShowFieldKeysStatement { database: None, from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), limit: None, offset: None }"
|
||||
- "post_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: None, from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), limit: None, offset: None })"
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW FIELD KEYS ON telegraf FROM /cpu/\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }), limit: None, offset: None })"
|
||||
- "pre_visit_show_field_keys_statement: ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }), limit: None, offset: None }"
|
||||
- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))"
|
||||
- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))"
|
||||
- "pre_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }"
|
||||
- "pre_visit_measurement_name: Regex(Regex(\"cpu\"))"
|
||||
- "post_visit_measurement_name: Regex(Regex(\"cpu\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }"
|
||||
- "post_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }"
|
||||
- "post_visit_show_field_keys_statement: ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }), limit: None, offset: None }"
|
||||
- "post_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }), limit: None, offset: None })"
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW FIELD KEYS\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: None, from: None, limit: None, offset: None })"
|
||||
- "pre_visit_show_field_keys_statement: ShowFieldKeysStatement { database: None, from: None, limit: None, offset: None }"
|
||||
- "post_visit_show_field_keys_statement: ShowFieldKeysStatement { database: None, from: None, limit: None, offset: None }"
|
||||
- "post_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: None, from: None, limit: None, offset: None })"
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW MEASUREMENTS ON db.rp\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: Some(DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))), with_measurement: None, condition: None, limit: None, offset: None })"
|
||||
- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: Some(DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))), with_measurement: None, condition: None, limit: None, offset: None }"
|
||||
- "pre_visit_extended_on_clause: DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))"
|
||||
- "post_visit_extended_on_clause: DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))"
|
||||
- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: Some(DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))), with_measurement: None, condition: None, limit: None, offset: None }"
|
||||
- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: Some(DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))), with_measurement: None, condition: None, limit: None, offset: None })"
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW MEASUREMENTS WITH MEASUREMENT = \\\"cpu\\\"\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: Some(Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })), condition: None, limit: None, offset: None })"
|
||||
- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: Some(Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })), condition: None, limit: None, offset: None }"
|
||||
- "pre_visit_with_measurement_clause: Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "post_visit_with_measurement_clause: Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })"
|
||||
- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: Some(Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })), condition: None, limit: None, offset: None }"
|
||||
- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: Some(Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })), condition: None, limit: None, offset: None })"
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW MEASUREMENTS WHERE host = 'west'\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: None, offset: None })"
|
||||
- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: None, offset: None }"
|
||||
- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) }"
|
||||
- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }"
|
||||
- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })"
|
||||
- "pre_visit_conditional_expression: Expr(Literal(String(\"west\")))"
|
||||
- "pre_visit_expr: Literal(String(\"west\"))"
|
||||
- "post_visit_expr: Literal(String(\"west\"))"
|
||||
- "post_visit_conditional_expression: Expr(Literal(String(\"west\")))"
|
||||
- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) }"
|
||||
- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })"
|
||||
- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: None, offset: None }"
|
||||
- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: None, offset: None })"
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW MEASUREMENTS LIMIT 5\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: Some(LimitClause(5)), offset: None })"
|
||||
- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: Some(LimitClause(5)), offset: None }"
|
||||
- "pre_visit_limit_clause: LimitClause(5)"
|
||||
- "post_visit_limit_clause: LimitClause(5)"
|
||||
- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: Some(LimitClause(5)), offset: None }"
|
||||
- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: Some(LimitClause(5)), offset: None })"
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW MEASUREMENTS OFFSET 10\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: Some(OffsetClause(10)) })"
|
||||
- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: Some(OffsetClause(10)) }"
|
||||
- "pre_visit_offset_clause: OffsetClause(10)"
|
||||
- "post_visit_offset_clause: OffsetClause(10)"
|
||||
- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: Some(OffsetClause(10)) }"
|
||||
- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: Some(OffsetClause(10)) })"
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW MEASUREMENTS ON * WITH MEASUREMENT =~ /foo/ WHERE host = 'west' LIMIT 10 OFFSET 20\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: Some(AllDatabases), with_measurement: Some(Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: Some(LimitClause(10)), offset: Some(OffsetClause(20)) })"
|
||||
- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: Some(AllDatabases), with_measurement: Some(Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: Some(LimitClause(10)), offset: Some(OffsetClause(20)) }"
|
||||
- "pre_visit_extended_on_clause: AllDatabases"
|
||||
- "post_visit_extended_on_clause: AllDatabases"
|
||||
- "pre_visit_with_measurement_clause: Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) }"
|
||||
- "pre_visit_measurement_name: Regex(Regex(\"foo\"))"
|
||||
- "post_visit_measurement_name: Regex(Regex(\"foo\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) }"
|
||||
- "post_visit_with_measurement_clause: Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })"
|
||||
- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) }"
|
||||
- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }"
|
||||
- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })"
|
||||
- "pre_visit_conditional_expression: Expr(Literal(String(\"west\")))"
|
||||
- "pre_visit_expr: Literal(String(\"west\"))"
|
||||
- "post_visit_expr: Literal(String(\"west\"))"
|
||||
- "post_visit_conditional_expression: Expr(Literal(String(\"west\")))"
|
||||
- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) }"
|
||||
- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })"
|
||||
- "pre_visit_limit_clause: LimitClause(10)"
|
||||
- "post_visit_limit_clause: LimitClause(10)"
|
||||
- "pre_visit_offset_clause: OffsetClause(20)"
|
||||
- "post_visit_offset_clause: OffsetClause(20)"
|
||||
- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: Some(AllDatabases), with_measurement: Some(Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: Some(LimitClause(10)), offset: Some(OffsetClause(20)) }"
|
||||
- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: Some(AllDatabases), with_measurement: Some(Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: Some(LimitClause(10)), offset: Some(OffsetClause(20)) })"
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW MEASUREMENTS\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: None })"
|
||||
- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: None }"
|
||||
- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: None }"
|
||||
- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: None })"
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW RETENTION POLICIES ON telegraf\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowRetentionPolicies(ShowRetentionPoliciesStatement { database: Some(OnClause(Identifier(\"telegraf\"))) })"
|
||||
- "pre_visit_show_retention_policies_statement: ShowRetentionPoliciesStatement { database: Some(OnClause(Identifier(\"telegraf\"))) }"
|
||||
- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))"
|
||||
- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))"
|
||||
- "post_visit_show_retention_policies_statement: ShowRetentionPoliciesStatement { database: Some(OnClause(Identifier(\"telegraf\"))) }"
|
||||
- "post_visit_statement: ShowRetentionPolicies(ShowRetentionPoliciesStatement { database: Some(OnClause(Identifier(\"telegraf\"))) })"
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW RETENTION POLICIES\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowRetentionPolicies(ShowRetentionPoliciesStatement { database: None })"
|
||||
- "pre_visit_show_retention_policies_statement: ShowRetentionPoliciesStatement { database: None }"
|
||||
- "post_visit_show_retention_policies_statement: ShowRetentionPoliciesStatement { database: None }"
|
||||
- "post_visit_statement: ShowRetentionPolicies(ShowRetentionPoliciesStatement { database: None })"
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW TAG KEYS ON telegraf FROM cpu WHERE host = \\\"west\\\" LIMIT 5 OFFSET 10\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowTagKeys(ShowTagKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) })"
|
||||
- "pre_visit_show_tag_keys_statement: ShowTagKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) }"
|
||||
- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))"
|
||||
- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))"
|
||||
- "pre_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "post_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }"
|
||||
- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) }"
|
||||
- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }"
|
||||
- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })"
|
||||
- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"west\"), data_type: None })"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"west\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"west\"), data_type: None }"
|
||||
- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"west\"), data_type: None })"
|
||||
- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) }"
|
||||
- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })"
|
||||
- "pre_visit_limit_clause: LimitClause(5)"
|
||||
- "post_visit_limit_clause: LimitClause(5)"
|
||||
- "pre_visit_offset_clause: OffsetClause(10)"
|
||||
- "post_visit_offset_clause: OffsetClause(10)"
|
||||
- "post_visit_show_tag_keys_statement: ShowTagKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) }"
|
||||
- "post_visit_statement: ShowTagKeys(ShowTagKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) })"
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW TAG KEYS\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowTagKeys(ShowTagKeysStatement { database: None, from: None, condition: None, limit: None, offset: None })"
|
||||
- "pre_visit_show_tag_keys_statement: ShowTagKeysStatement { database: None, from: None, condition: None, limit: None, offset: None }"
|
||||
- "post_visit_show_tag_keys_statement: ShowTagKeysStatement { database: None, from: None, condition: None, limit: None, offset: None }"
|
||||
- "post_visit_statement: ShowTagKeys(ShowTagKeysStatement { database: None, from: None, condition: None, limit: None, offset: None })"
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW TAG VALUES WITH KEY =~ /host|region/\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: EqRegex(Regex(\"host|region\")), condition: None, limit: None, offset: None })"
|
||||
- "pre_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: EqRegex(Regex(\"host|region\")), condition: None, limit: None, offset: None }"
|
||||
- "pre_visit_with_key_clause: EqRegex(Regex(\"host|region\"))"
|
||||
- "post_visit_with_key_clause: EqRegex(Regex(\"host|region\"))"
|
||||
- "post_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: EqRegex(Regex(\"host|region\")), condition: None, limit: None, offset: None }"
|
||||
- "post_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: EqRegex(Regex(\"host|region\")), condition: None, limit: None, offset: None })"
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW TAG VALUES WITH KEY IN (host, region)\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] }), condition: None, limit: None, offset: None })"
|
||||
- "pre_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] }), condition: None, limit: None, offset: None }"
|
||||
- "pre_visit_with_key_clause: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] })"
|
||||
- "post_visit_with_key_clause: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] })"
|
||||
- "post_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] }), condition: None, limit: None, offset: None }"
|
||||
- "post_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] }), condition: None, limit: None, offset: None })"
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW TAG VALUES ON telegraf FROM cpu WITH KEY = host WHERE host = \\\"west\\\" LIMIT 5 OFFSET 10\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowTagValues(ShowTagValuesStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), with_key: Eq(Identifier(\"host\")), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) })"
|
||||
- "pre_visit_show_tag_values_statement: ShowTagValuesStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), with_key: Eq(Identifier(\"host\")), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) }"
|
||||
- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))"
|
||||
- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))"
|
||||
- "pre_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "post_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }"
|
||||
- "pre_visit_with_key_clause: Eq(Identifier(\"host\"))"
|
||||
- "post_visit_with_key_clause: Eq(Identifier(\"host\"))"
|
||||
- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) }"
|
||||
- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }"
|
||||
- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })"
|
||||
- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"west\"), data_type: None })"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"west\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"west\"), data_type: None }"
|
||||
- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"west\"), data_type: None })"
|
||||
- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) }"
|
||||
- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })"
|
||||
- "pre_visit_limit_clause: LimitClause(5)"
|
||||
- "post_visit_limit_clause: LimitClause(5)"
|
||||
- "pre_visit_offset_clause: OffsetClause(10)"
|
||||
- "post_visit_offset_clause: OffsetClause(10)"
|
||||
- "post_visit_show_tag_values_statement: ShowTagValuesStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), with_key: Eq(Identifier(\"host\")), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) }"
|
||||
- "post_visit_statement: ShowTagValues(ShowTagValuesStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), with_key: Eq(Identifier(\"host\")), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) })"
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"SHOW TAG VALUES WITH KEY = host\")"
|
||||
---
|
||||
- "pre_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: Eq(Identifier(\"host\")), condition: None, limit: None, offset: None })"
|
||||
- "pre_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: Eq(Identifier(\"host\")), condition: None, limit: None, offset: None }"
|
||||
- "pre_visit_with_key_clause: Eq(Identifier(\"host\"))"
|
||||
- "post_visit_with_key_clause: Eq(Identifier(\"host\"))"
|
||||
- "post_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: Eq(Identifier(\"host\")), condition: None, limit: None, offset: None }"
|
||||
- "post_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: Eq(Identifier(\"host\")), condition: None, limit: None, offset: None })"
|
||||
|
|
@ -21,6 +21,7 @@
|
|||
//! let vis = MyVisitor;
|
||||
//! statement.accept(vis);
|
||||
//! ```
|
||||
use self::Recursion::*;
|
||||
use crate::common::{
|
||||
LimitClause, MeasurementName, OffsetClause, OrderByClause, QualifiedMeasurementName,
|
||||
WhereClause,
|
||||
|
@ -45,7 +46,6 @@ use crate::show_tag_keys::ShowTagKeysStatement;
|
|||
use crate::show_tag_values::{ShowTagValuesStatement, WithKeyClause};
|
||||
use crate::simple_from_clause::{DeleteFromClause, ShowFromClause};
|
||||
use crate::statement::Statement;
|
||||
use crate::visit::Recursion::*;
|
||||
|
||||
/// The result type for a [`Visitor`].
|
||||
pub type VisitorResult<T, E = &'static str> = Result<T, E>;
|
||||
|
@ -1168,6 +1168,8 @@ impl Visitable for OnClause {
|
|||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::Recursion::Continue;
|
||||
use super::{Recursion, Visitable, Visitor, VisitorResult};
|
||||
use crate::common::{
|
||||
LimitClause, MeasurementName, OffsetClause, OrderByClause, QualifiedMeasurementName,
|
||||
WhereClause,
|
||||
|
@ -1191,8 +1193,6 @@ mod test {
|
|||
use crate::show_tag_values::{ShowTagValuesStatement, WithKeyClause};
|
||||
use crate::simple_from_clause::{DeleteFromClause, ShowFromClause};
|
||||
use crate::statement::{statement, Statement};
|
||||
use crate::visit::Recursion::Continue;
|
||||
use crate::visit::{Recursion, Visitable, Visitor, VisitorResult};
|
||||
use std::fmt::Debug;
|
||||
|
||||
struct TestVisitor(Vec<String>);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -439,6 +439,7 @@ impl Config {
|
|||
minutes_without_new_writes_to_be_cold: 10,
|
||||
hot_compaction_hours_threshold_1: 4,
|
||||
hot_compaction_hours_threshold_2: 24,
|
||||
max_parallel_partitions: 20,
|
||||
};
|
||||
|
||||
let querier_config = QuerierConfig {
|
||||
|
|
|
@ -201,7 +201,7 @@ mod tests {
|
|||
assert_eq!(expected_pk, pk);
|
||||
|
||||
// compact
|
||||
let exc = Executor::new(1);
|
||||
let exc = Executor::new_testing();
|
||||
let CompactedStream { stream, .. } =
|
||||
compact_persisting_batch(&exc, Some(SortKey::empty()), "test_table".into(), batch)
|
||||
.await
|
||||
|
@ -238,7 +238,7 @@ mod tests {
|
|||
assert_eq!(expected_pk, pk);
|
||||
|
||||
// compact
|
||||
let exc = Executor::new(1);
|
||||
let exc = Executor::new_testing();
|
||||
let CompactedStream {
|
||||
stream,
|
||||
data_sort_key,
|
||||
|
@ -286,7 +286,7 @@ mod tests {
|
|||
let expected_pk = vec!["tag1", "tag3", "time"];
|
||||
assert_eq!(expected_pk, pk);
|
||||
|
||||
let exc = Executor::new(1);
|
||||
let exc = Executor::new_testing();
|
||||
|
||||
// NO SORT KEY from the catalog here, first persisting batch
|
||||
let CompactedStream {
|
||||
|
@ -340,7 +340,7 @@ mod tests {
|
|||
let expected_pk = vec!["tag1", "tag3", "time"];
|
||||
assert_eq!(expected_pk, pk);
|
||||
|
||||
let exc = Executor::new(1);
|
||||
let exc = Executor::new_testing();
|
||||
|
||||
// SPECIFY A SORT KEY HERE to simulate a sort key being stored in the catalog
|
||||
// this is NOT what the computed sort key would be based on this data's cardinality
|
||||
|
@ -399,7 +399,7 @@ mod tests {
|
|||
let expected_pk = vec!["tag1", "tag3", "time"];
|
||||
assert_eq!(expected_pk, pk);
|
||||
|
||||
let exc = Executor::new(1);
|
||||
let exc = Executor::new_testing();
|
||||
|
||||
// SPECIFY A SORT KEY HERE to simulate a sort key being stored in the catalog
|
||||
// this is NOT what the computed sort key would be based on this data's cardinality
|
||||
|
@ -462,7 +462,7 @@ mod tests {
|
|||
let expected_pk = vec!["tag1", "tag3", "time"];
|
||||
assert_eq!(expected_pk, pk);
|
||||
|
||||
let exc = Executor::new(1);
|
||||
let exc = Executor::new_testing();
|
||||
|
||||
// SPECIFY A SORT KEY HERE to simulate a sort key being stored in the catalog
|
||||
// this is NOT what the computed sort key would be based on this data's cardinality
|
||||
|
@ -529,7 +529,7 @@ mod tests {
|
|||
assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"]));
|
||||
|
||||
// compact
|
||||
let exc = Executor::new(1);
|
||||
let exc = Executor::new_testing();
|
||||
let stream = compact(&exc, "test_table".into(), Arc::new(batch), sort_key)
|
||||
.await
|
||||
.unwrap();
|
||||
|
@ -570,7 +570,7 @@ mod tests {
|
|||
assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"]));
|
||||
|
||||
// compact
|
||||
let exc = Executor::new(1);
|
||||
let exc = Executor::new_testing();
|
||||
let stream = compact(&exc, "test_table".into(), Arc::new(batch), sort_key)
|
||||
.await
|
||||
.unwrap();
|
||||
|
@ -616,7 +616,7 @@ mod tests {
|
|||
assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"]));
|
||||
|
||||
// compact
|
||||
let exc = Executor::new(1);
|
||||
let exc = Executor::new_testing();
|
||||
let stream = compact(&exc, "test_table".into(), Arc::new(batch), sort_key)
|
||||
.await
|
||||
.unwrap();
|
||||
|
@ -662,7 +662,7 @@ mod tests {
|
|||
assert_eq!(sort_key, SortKey::from_columns(["tag1", "tag2", "time"]));
|
||||
|
||||
// compact
|
||||
let exc = Executor::new(1);
|
||||
let exc = Executor::new_testing();
|
||||
let stream = compact(&exc, "test_table".into(), Arc::new(batch), sort_key)
|
||||
.await
|
||||
.unwrap();
|
||||
|
@ -712,7 +712,7 @@ mod tests {
|
|||
assert_eq!(sort_key, SortKey::from_columns(["tag1", "tag2", "time"]));
|
||||
|
||||
// compact
|
||||
let exc = Executor::new(1);
|
||||
let exc = Executor::new_testing();
|
||||
let stream = compact(&exc, "test_table".into(), Arc::new(batch), sort_key)
|
||||
.await
|
||||
.unwrap();
|
||||
|
|
|
@ -800,7 +800,7 @@ mod tests {
|
|||
(shard1.id, shard1.shard_index),
|
||||
(shard2.id, shard2.shard_index),
|
||||
],
|
||||
Arc::new(Executor::new(1)),
|
||||
Arc::new(Executor::new_testing()),
|
||||
BackoffConfig::default(),
|
||||
Arc::clone(&metrics),
|
||||
)
|
||||
|
|
|
@ -521,7 +521,7 @@ mod tests {
|
|||
Arc::clone(&catalog),
|
||||
object_store,
|
||||
reading,
|
||||
Arc::new(Executor::new(1)),
|
||||
Arc::new(Executor::new_testing()),
|
||||
Arc::clone(&metrics),
|
||||
skip_to_oldest_available,
|
||||
1,
|
||||
|
|
|
@ -2,9 +2,12 @@
|
|||
|
||||
use std::{pin::Pin, sync::Arc};
|
||||
|
||||
use arrow::{array::new_null_array, error::ArrowError, record_batch::RecordBatch};
|
||||
use arrow_util::optimize::{
|
||||
prepare_batch_for_flight, prepare_schema_for_flight, split_batch_for_grpc_response,
|
||||
use arrow::{error::ArrowError, record_batch::RecordBatch};
|
||||
use arrow_util::{
|
||||
optimize::{
|
||||
prepare_batch_for_flight, prepare_schema_for_flight, split_batch_for_grpc_response,
|
||||
},
|
||||
test_util::equalize_batch_schemas,
|
||||
};
|
||||
use data_types::{NamespaceId, PartitionId, SequenceNumber, TableId};
|
||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||
|
@ -12,7 +15,7 @@ use datafusion_util::MemoryStream;
|
|||
use futures::{Stream, StreamExt, TryStreamExt};
|
||||
use generated_types::ingester::IngesterQueryRequest;
|
||||
use observability_deps::tracing::*;
|
||||
use schema::{merge::SchemaMerger, Projection};
|
||||
use schema::Projection;
|
||||
use snafu::{ensure, Snafu};
|
||||
use trace::span::{Span, SpanRecorder};
|
||||
|
||||
|
@ -187,7 +190,6 @@ impl IngesterQueryResponse {
|
|||
/// do not line up with the snapshot-scoped record batches.
|
||||
pub async fn into_record_batches(self) -> Vec<RecordBatch> {
|
||||
let mut snapshot_schema = None;
|
||||
let mut schema_merger = SchemaMerger::new();
|
||||
let mut batches = vec![];
|
||||
|
||||
let mut stream = self.flatten();
|
||||
|
@ -201,33 +203,13 @@ impl IngesterQueryResponse {
|
|||
}
|
||||
FlatIngesterQueryResponse::StartSnapshot { schema } => {
|
||||
snapshot_schema = Some(Arc::clone(&schema));
|
||||
|
||||
schema_merger = schema_merger
|
||||
.merge(&schema::Schema::try_from(schema).unwrap())
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert!(!batches.is_empty());
|
||||
|
||||
// equalize schemas
|
||||
let common_schema = schema_merger.build().as_arrow();
|
||||
batches
|
||||
.into_iter()
|
||||
.map(|batch| {
|
||||
let batch_schema = batch.schema();
|
||||
let columns = common_schema
|
||||
.fields()
|
||||
.iter()
|
||||
.map(|field| match batch_schema.index_of(field.name()) {
|
||||
Ok(idx) => Arc::clone(batch.column(idx)),
|
||||
Err(_) => new_null_array(field.data_type(), batch.num_rows()),
|
||||
})
|
||||
.collect();
|
||||
RecordBatch::try_new(Arc::clone(&common_schema), columns).unwrap()
|
||||
})
|
||||
.collect()
|
||||
equalize_batch_schemas(batches).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -455,7 +455,7 @@ pub(crate) async fn make_ingester_data(
|
|||
let metrics: Arc<metric::Registry> = Default::default();
|
||||
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metrics)));
|
||||
let object_store = Arc::new(InMemory::new());
|
||||
let exec = Arc::new(iox_query::exec::Executor::new(1));
|
||||
let exec = Arc::new(iox_query::exec::Executor::new_testing());
|
||||
let lifecycle = LifecycleManager::new(
|
||||
LifecycleConfig::new(
|
||||
200_000_000,
|
||||
|
|
|
@ -105,7 +105,7 @@ impl TestContext {
|
|||
Arc::clone(&catalog),
|
||||
Arc::clone(&object_store),
|
||||
write_buffer_read,
|
||||
Arc::new(Executor::new(1)),
|
||||
Arc::new(Executor::new_testing()),
|
||||
Arc::clone(&metrics),
|
||||
true,
|
||||
1,
|
||||
|
@ -160,7 +160,7 @@ impl TestContext {
|
|||
Arc::clone(&self.catalog),
|
||||
Arc::clone(&self.object_store),
|
||||
write_buffer_read,
|
||||
Arc::new(Executor::new(1)),
|
||||
Arc::new(Executor::new_testing()),
|
||||
Arc::clone(&self.metrics),
|
||||
true,
|
||||
1,
|
||||
|
|
|
@ -55,7 +55,7 @@ impl std::fmt::Display for NamespaceName {
|
|||
#[derive(Debug)]
|
||||
pub(crate) struct NamespaceData {
|
||||
namespace_id: NamespaceId,
|
||||
namespace_name: DeferredLoad<NamespaceName>,
|
||||
namespace_name: Arc<DeferredLoad<NamespaceName>>,
|
||||
|
||||
/// A set of tables this [`NamespaceData`] instance has processed
|
||||
/// [`DmlOperation`]'s for.
|
||||
|
@ -94,7 +94,7 @@ impl NamespaceData {
|
|||
|
||||
Self {
|
||||
namespace_id,
|
||||
namespace_name,
|
||||
namespace_name: Arc::new(namespace_name),
|
||||
tables: Default::default(),
|
||||
table_name_resolver,
|
||||
table_count,
|
||||
|
@ -143,6 +143,7 @@ impl DmlSink for NamespaceData {
|
|||
table_id,
|
||||
self.table_name_resolver.for_table(table_id),
|
||||
self.namespace_id,
|
||||
Arc::clone(&self.namespace_name),
|
||||
Arc::clone(&self.partition_provider),
|
||||
))
|
||||
});
|
||||
|
@ -235,6 +236,9 @@ mod tests {
|
|||
PartitionId::new(0),
|
||||
PartitionKey::from("banana-split"),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
|
|
@ -8,7 +8,7 @@ use observability_deps::tracing::*;
|
|||
use schema::sort::SortKey;
|
||||
|
||||
use self::buffer::{traits::Queryable, BufferState, DataBuffer, Persisting};
|
||||
use super::table::TableName;
|
||||
use super::{namespace::NamespaceName, table::TableName};
|
||||
use crate::{deferred_load::DeferredLoad, query_adaptor::QueryAdaptor};
|
||||
|
||||
mod buffer;
|
||||
|
@ -54,8 +54,13 @@ pub(crate) struct PartitionData {
|
|||
/// fetch details.
|
||||
sort_key: SortKeyState,
|
||||
|
||||
/// The namespace & table IDs for this partition.
|
||||
/// The namespace this partition is part of.
|
||||
namespace_id: NamespaceId,
|
||||
/// The name of the namespace this partition is part of, potentially
|
||||
/// unresolved / deferred.
|
||||
namespace_name: Arc<DeferredLoad<NamespaceName>>,
|
||||
|
||||
/// The catalog ID for the table this partition is part of.
|
||||
table_id: TableId,
|
||||
/// The name of the table this partition is part of, potentially unresolved
|
||||
/// / deferred.
|
||||
|
@ -75,6 +80,7 @@ impl PartitionData {
|
|||
id: PartitionId,
|
||||
partition_key: PartitionKey,
|
||||
namespace_id: NamespaceId,
|
||||
namespace_name: Arc<DeferredLoad<NamespaceName>>,
|
||||
table_id: TableId,
|
||||
table_name: Arc<DeferredLoad<TableName>>,
|
||||
sort_key: SortKeyState,
|
||||
|
@ -84,6 +90,7 @@ impl PartitionData {
|
|||
partition_key,
|
||||
sort_key,
|
||||
namespace_id,
|
||||
namespace_name,
|
||||
table_id,
|
||||
table_name,
|
||||
buffer: DataBuffer::default(),
|
||||
|
@ -247,6 +254,14 @@ impl PartitionData {
|
|||
self.namespace_id
|
||||
}
|
||||
|
||||
/// Return the [`NamespaceName`] this partition is a part of, potentially
|
||||
/// deferred / not yet resolved.
|
||||
///
|
||||
/// NOTE: this MAY involve querying the catalog with unbounded retries.
|
||||
pub(crate) fn namespace_name(&self) -> &Arc<DeferredLoad<NamespaceName>> {
|
||||
&self.namespace_name
|
||||
}
|
||||
|
||||
/// Return the [`SortKey`] for this partition.
|
||||
///
|
||||
/// NOTE: this MAY involve querying the catalog with unbounded retries.
|
||||
|
@ -289,6 +304,7 @@ mod tests {
|
|||
lazy_static! {
|
||||
static ref PARTITION_KEY: PartitionKey = PartitionKey::from("platanos");
|
||||
static ref TABLE_NAME: TableName = TableName::from("bananas");
|
||||
static ref NAMESPACE_NAME: NamespaceName = NamespaceName::from("namespace-bananas");
|
||||
}
|
||||
|
||||
// Write some data and read it back from the buffer.
|
||||
|
@ -301,6 +317,9 @@ mod tests {
|
|||
PARTITION_ID,
|
||||
PARTITION_KEY.clone(),
|
||||
NamespaceId::new(3),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NAMESPACE_NAME.clone()
|
||||
})),
|
||||
TableId::new(4),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TABLE_NAME.clone()
|
||||
|
@ -378,6 +397,9 @@ mod tests {
|
|||
PARTITION_ID,
|
||||
PARTITION_KEY.clone(),
|
||||
NamespaceId::new(3),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NAMESPACE_NAME.clone()
|
||||
})),
|
||||
TableId::new(4),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TABLE_NAME.clone()
|
||||
|
@ -508,6 +530,9 @@ mod tests {
|
|||
PARTITION_ID,
|
||||
PARTITION_KEY.clone(),
|
||||
NamespaceId::new(3),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NAMESPACE_NAME.clone()
|
||||
})),
|
||||
TableId::new(4),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TABLE_NAME.clone()
|
||||
|
@ -616,6 +641,9 @@ mod tests {
|
|||
PartitionId::new(1),
|
||||
"bananas".into(),
|
||||
NamespaceId::new(42),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NAMESPACE_NAME.clone()
|
||||
})),
|
||||
TableId::new(1),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from("platanos")
|
||||
|
@ -672,6 +700,9 @@ mod tests {
|
|||
PartitionId::new(1),
|
||||
"bananas".into(),
|
||||
NamespaceId::new(42),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NAMESPACE_NAME.clone()
|
||||
})),
|
||||
TableId::new(1),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from("platanos")
|
||||
|
@ -693,6 +724,9 @@ mod tests {
|
|||
PARTITION_ID,
|
||||
PARTITION_KEY.clone(),
|
||||
NamespaceId::new(3),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NAMESPACE_NAME.clone()
|
||||
})),
|
||||
TableId::new(4),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TABLE_NAME.clone()
|
||||
|
@ -738,6 +772,9 @@ mod tests {
|
|||
PARTITION_ID,
|
||||
PARTITION_KEY.clone(),
|
||||
NamespaceId::new(3),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NAMESPACE_NAME.clone()
|
||||
})),
|
||||
TableId::new(4),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TABLE_NAME.clone()
|
||||
|
@ -761,6 +798,9 @@ mod tests {
|
|||
PARTITION_ID,
|
||||
PARTITION_KEY.clone(),
|
||||
NamespaceId::new(3),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NAMESPACE_NAME.clone()
|
||||
})),
|
||||
TableId::new(4),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TABLE_NAME.clone()
|
||||
|
@ -778,6 +818,9 @@ mod tests {
|
|||
PARTITION_ID,
|
||||
PARTITION_KEY.clone(),
|
||||
NamespaceId::new(3),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NAMESPACE_NAME.clone()
|
||||
})),
|
||||
TableId::new(4),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TABLE_NAME.clone()
|
||||
|
@ -802,6 +845,9 @@ mod tests {
|
|||
PARTITION_ID,
|
||||
PARTITION_KEY.clone(),
|
||||
NamespaceId::new(3),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NAMESPACE_NAME.clone()
|
||||
})),
|
||||
TableId::new(4),
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TABLE_NAME.clone()
|
||||
|
|
|
@ -10,6 +10,7 @@ use parking_lot::Mutex;
|
|||
use super::r#trait::PartitionProvider;
|
||||
use crate::{
|
||||
buffer_tree::{
|
||||
namespace::NamespaceName,
|
||||
partition::{resolver::SortKeyResolver, PartitionData, SortKeyState},
|
||||
table::TableName,
|
||||
},
|
||||
|
@ -160,6 +161,7 @@ where
|
|||
&self,
|
||||
partition_key: PartitionKey,
|
||||
namespace_id: NamespaceId,
|
||||
namespace_name: Arc<DeferredLoad<NamespaceName>>,
|
||||
table_id: TableId,
|
||||
table_name: Arc<DeferredLoad<TableName>>,
|
||||
) -> PartitionData {
|
||||
|
@ -187,6 +189,7 @@ where
|
|||
partition_id,
|
||||
key,
|
||||
namespace_id,
|
||||
namespace_name,
|
||||
table_id,
|
||||
table_name,
|
||||
SortKeyState::Deferred(Arc::new(sort_key_resolver)),
|
||||
|
@ -197,7 +200,13 @@ where
|
|||
|
||||
// Otherwise delegate to the catalog / inner impl.
|
||||
self.inner
|
||||
.get_partition(partition_key, namespace_id, table_id, table_name)
|
||||
.get_partition(
|
||||
partition_key,
|
||||
namespace_id,
|
||||
namespace_name,
|
||||
table_id,
|
||||
table_name,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
@ -214,6 +223,7 @@ mod tests {
|
|||
const PARTITION_KEY: &str = "bananas";
|
||||
const PARTITION_ID: PartitionId = PartitionId::new(42);
|
||||
const NAMESPACE_ID: NamespaceId = NamespaceId::new(2);
|
||||
const NAMESPACE_NAME: &str = "ns-bananas";
|
||||
const TABLE_ID: TableId = TableId::new(3);
|
||||
const TABLE_NAME: &str = "platanos";
|
||||
|
||||
|
@ -239,6 +249,9 @@ mod tests {
|
|||
PARTITION_ID,
|
||||
PARTITION_KEY.into(),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -252,6 +265,9 @@ mod tests {
|
|||
.get_partition(
|
||||
PARTITION_KEY.into(),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -262,6 +278,7 @@ mod tests {
|
|||
assert_eq!(got.partition_id(), PARTITION_ID);
|
||||
assert_eq!(got.table_id(), TABLE_ID);
|
||||
assert_eq!(&**got.table_name().get().await, TABLE_NAME);
|
||||
assert_eq!(&**got.namespace_name().get().await, NAMESPACE_NAME);
|
||||
assert!(cache.inner.is_empty());
|
||||
}
|
||||
|
||||
|
@ -286,6 +303,9 @@ mod tests {
|
|||
.get_partition(
|
||||
callers_partition_key.clone(),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -296,6 +316,7 @@ mod tests {
|
|||
assert_eq!(got.partition_id(), PARTITION_ID);
|
||||
assert_eq!(got.table_id(), TABLE_ID);
|
||||
assert_eq!(&**got.table_name().get().await, TABLE_NAME);
|
||||
assert_eq!(&**got.namespace_name().get().await, NAMESPACE_NAME);
|
||||
assert_eq!(*got.partition_key(), PartitionKey::from(PARTITION_KEY));
|
||||
|
||||
// The cache should have been cleaned up as it was consumed.
|
||||
|
@ -318,6 +339,9 @@ mod tests {
|
|||
other_key_id,
|
||||
other_key.clone(),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -339,6 +363,9 @@ mod tests {
|
|||
.get_partition(
|
||||
other_key.clone(),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -358,6 +385,9 @@ mod tests {
|
|||
PARTITION_ID,
|
||||
PARTITION_KEY.into(),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
other_table,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -379,6 +409,9 @@ mod tests {
|
|||
.get_partition(
|
||||
PARTITION_KEY.into(),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
other_table,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
|
|
@ -12,6 +12,7 @@ use observability_deps::tracing::debug;
|
|||
use super::r#trait::PartitionProvider;
|
||||
use crate::{
|
||||
buffer_tree::{
|
||||
namespace::NamespaceName,
|
||||
partition::{PartitionData, SortKeyState},
|
||||
table::TableName,
|
||||
},
|
||||
|
@ -58,6 +59,7 @@ impl PartitionProvider for CatalogPartitionResolver {
|
|||
&self,
|
||||
partition_key: PartitionKey,
|
||||
namespace_id: NamespaceId,
|
||||
namespace_name: Arc<DeferredLoad<NamespaceName>>,
|
||||
table_id: TableId,
|
||||
table_name: Arc<DeferredLoad<TableName>>,
|
||||
) -> PartitionData {
|
||||
|
@ -81,6 +83,7 @@ impl PartitionProvider for CatalogPartitionResolver {
|
|||
// definitely has no other refs.
|
||||
partition_key,
|
||||
namespace_id,
|
||||
namespace_name,
|
||||
table_id,
|
||||
table_name,
|
||||
SortKeyState::Provided(p.sort_key()),
|
||||
|
@ -99,6 +102,7 @@ mod tests {
|
|||
use crate::TRANSITION_SHARD_ID;
|
||||
|
||||
const TABLE_NAME: &str = "bananas";
|
||||
const NAMESPACE_NAME: &str = "ns-bananas";
|
||||
const PARTITION_KEY: &str = "platanos";
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -140,6 +144,9 @@ mod tests {
|
|||
.get_partition(
|
||||
callers_partition_key.clone(),
|
||||
namespace_id,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
table_id,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
|
|
@ -8,7 +8,7 @@ use parking_lot::Mutex;
|
|||
|
||||
use super::r#trait::PartitionProvider;
|
||||
use crate::{
|
||||
buffer_tree::{partition::PartitionData, table::TableName},
|
||||
buffer_tree::{namespace::NamespaceName, partition::PartitionData, table::TableName},
|
||||
deferred_load::DeferredLoad,
|
||||
};
|
||||
|
||||
|
@ -51,6 +51,7 @@ impl PartitionProvider for MockPartitionProvider {
|
|||
&self,
|
||||
partition_key: PartitionKey,
|
||||
namespace_id: NamespaceId,
|
||||
namespace_name: Arc<DeferredLoad<NamespaceName>>,
|
||||
table_id: TableId,
|
||||
table_name: Arc<DeferredLoad<TableName>>,
|
||||
) -> PartitionData {
|
||||
|
@ -63,6 +64,7 @@ impl PartitionProvider for MockPartitionProvider {
|
|||
});
|
||||
|
||||
assert_eq!(p.namespace_id(), namespace_id);
|
||||
assert_eq!(p.namespace_name().to_string(), namespace_name.to_string());
|
||||
assert_eq!(p.table_name().to_string(), table_name.to_string());
|
||||
p
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ use async_trait::async_trait;
|
|||
use data_types::{NamespaceId, PartitionKey, TableId};
|
||||
|
||||
use crate::{
|
||||
buffer_tree::{partition::PartitionData, table::TableName},
|
||||
buffer_tree::{namespace::NamespaceName, partition::PartitionData, table::TableName},
|
||||
deferred_load::DeferredLoad,
|
||||
};
|
||||
|
||||
|
@ -21,6 +21,7 @@ pub(crate) trait PartitionProvider: Send + Sync + Debug {
|
|||
&self,
|
||||
partition_key: PartitionKey,
|
||||
namespace_id: NamespaceId,
|
||||
namespace_name: Arc<DeferredLoad<NamespaceName>>,
|
||||
table_id: TableId,
|
||||
table_name: Arc<DeferredLoad<TableName>>,
|
||||
) -> PartitionData;
|
||||
|
@ -35,11 +36,18 @@ where
|
|||
&self,
|
||||
partition_key: PartitionKey,
|
||||
namespace_id: NamespaceId,
|
||||
namespace_name: Arc<DeferredLoad<NamespaceName>>,
|
||||
table_id: TableId,
|
||||
table_name: Arc<DeferredLoad<TableName>>,
|
||||
) -> PartitionData {
|
||||
(**self)
|
||||
.get_partition(partition_key, namespace_id, table_id, table_name)
|
||||
.get_partition(
|
||||
partition_key,
|
||||
namespace_id,
|
||||
namespace_name,
|
||||
table_id,
|
||||
table_name,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
@ -57,6 +65,9 @@ mod tests {
|
|||
async fn test_arc_impl() {
|
||||
let key = PartitionKey::from("bananas");
|
||||
let namespace_id = NamespaceId::new(1234);
|
||||
let namespace_name = Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from("ns-platanos")
|
||||
}));
|
||||
let table_id = TableId::new(24);
|
||||
let table_name = Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from("platanos")
|
||||
|
@ -66,6 +77,7 @@ mod tests {
|
|||
partition,
|
||||
"bananas".into(),
|
||||
namespace_id,
|
||||
Arc::clone(&namespace_name),
|
||||
table_id,
|
||||
Arc::clone(&table_name),
|
||||
SortKeyState::Provided(None),
|
||||
|
@ -74,10 +86,17 @@ mod tests {
|
|||
let mock = Arc::new(MockPartitionProvider::default().with_partition(data));
|
||||
|
||||
let got = mock
|
||||
.get_partition(key, namespace_id, table_id, Arc::clone(&table_name))
|
||||
.get_partition(
|
||||
key,
|
||||
namespace_id,
|
||||
Arc::clone(&namespace_name),
|
||||
table_id,
|
||||
Arc::clone(&table_name),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(got.partition_id(), partition);
|
||||
assert_eq!(got.namespace_id(), namespace_id);
|
||||
assert_eq!(got.namespace_name().to_string(), namespace_name.to_string());
|
||||
assert_eq!(got.table_name().to_string(), table_name.to_string());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -186,7 +186,9 @@ mod tests {
|
|||
use super::*;
|
||||
use crate::{
|
||||
buffer_tree::{
|
||||
namespace::{name_resolver::mock::MockNamespaceNameProvider, NamespaceData},
|
||||
namespace::{
|
||||
name_resolver::mock::MockNamespaceNameProvider, NamespaceData, NamespaceName,
|
||||
},
|
||||
partition::{resolver::mock::MockPartitionProvider, PartitionData, SortKeyState},
|
||||
table::{name_resolver::mock::MockTableNameProvider, TableName},
|
||||
},
|
||||
|
@ -211,6 +213,9 @@ mod tests {
|
|||
PartitionId::new(0),
|
||||
PartitionKey::from("banana-split"),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -330,6 +335,9 @@ mod tests {
|
|||
PartitionId::new(0),
|
||||
PartitionKey::from("p1"),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -362,6 +370,9 @@ mod tests {
|
|||
PartitionId::new(0),
|
||||
PartitionKey::from("p1"),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -372,6 +383,9 @@ mod tests {
|
|||
PartitionId::new(1),
|
||||
PartitionKey::from("p2"),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -416,6 +430,9 @@ mod tests {
|
|||
PartitionId::new(0),
|
||||
PartitionKey::from("p1"),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -426,7 +443,10 @@ mod tests {
|
|||
PartitionId::new(1),
|
||||
PartitionKey::from("p2"),
|
||||
NamespaceId::new(4321), // A different namespace ID.
|
||||
TableId::new(1234), // A different table ID.
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TableId::new(1234), // A different table ID.
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
})),
|
||||
|
@ -469,6 +489,9 @@ mod tests {
|
|||
PartitionId::new(0),
|
||||
PartitionKey::from("p1"),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -479,6 +502,9 @@ mod tests {
|
|||
PartitionId::new(1),
|
||||
PartitionKey::from("p2"),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TableId::new(1234), // A different table ID.
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -523,6 +549,9 @@ mod tests {
|
|||
PartitionId::new(0),
|
||||
PartitionKey::from("p1"),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -569,6 +598,9 @@ mod tests {
|
|||
PartitionId::new(0),
|
||||
PartitionKey::from("p1"),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -579,6 +611,9 @@ mod tests {
|
|||
PartitionId::new(0),
|
||||
PartitionKey::from("p2"),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -652,6 +687,9 @@ mod tests {
|
|||
PartitionId::new(0),
|
||||
PartitionKey::from("p1"),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -731,6 +769,9 @@ mod tests {
|
|||
PartitionId::new(0),
|
||||
PartitionKey::from("p1"),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -741,6 +782,9 @@ mod tests {
|
|||
PartitionId::new(1),
|
||||
PartitionKey::from("p2"),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from(NAMESPACE_NAME)
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
|
|
@ -12,7 +12,10 @@ use parking_lot::{Mutex, RwLock};
|
|||
use schema::Projection;
|
||||
use trace::span::{Span, SpanRecorder};
|
||||
|
||||
use super::partition::{resolver::PartitionProvider, PartitionData};
|
||||
use super::{
|
||||
namespace::NamespaceName,
|
||||
partition::{resolver::PartitionProvider, PartitionData},
|
||||
};
|
||||
use crate::{
|
||||
arcmap::ArcMap,
|
||||
deferred_load::DeferredLoad,
|
||||
|
@ -102,6 +105,7 @@ pub(crate) struct TableData {
|
|||
|
||||
/// The catalog ID of the namespace this table is being populated from.
|
||||
namespace_id: NamespaceId,
|
||||
namespace_name: Arc<DeferredLoad<NamespaceName>>,
|
||||
|
||||
/// An abstract constructor of [`PartitionData`] instances for a given
|
||||
/// `(key, table)` tuple.
|
||||
|
@ -126,12 +130,14 @@ impl TableData {
|
|||
table_id: TableId,
|
||||
table_name: DeferredLoad<TableName>,
|
||||
namespace_id: NamespaceId,
|
||||
namespace_name: Arc<DeferredLoad<NamespaceName>>,
|
||||
partition_provider: Arc<dyn PartitionProvider>,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_id,
|
||||
table_name: Arc::new(table_name),
|
||||
namespace_id,
|
||||
namespace_name,
|
||||
partition_data: Default::default(),
|
||||
partition_provider,
|
||||
}
|
||||
|
@ -154,6 +160,7 @@ impl TableData {
|
|||
.get_partition(
|
||||
partition_key.clone(),
|
||||
self.namespace_id,
|
||||
Arc::clone(&self.namespace_name),
|
||||
self.table_id,
|
||||
Arc::clone(&self.table_name),
|
||||
)
|
||||
|
@ -288,6 +295,9 @@ mod tests {
|
|||
PARTITION_ID,
|
||||
PARTITION_KEY.into(),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from("platanos")
|
||||
})),
|
||||
TABLE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
TableName::from(TABLE_NAME)
|
||||
|
@ -302,6 +312,9 @@ mod tests {
|
|||
TableName::from(TABLE_NAME)
|
||||
}),
|
||||
NAMESPACE_ID,
|
||||
Arc::new(DeferredLoad::new(Duration::from_secs(1), async {
|
||||
NamespaceName::from("platanos")
|
||||
})),
|
||||
partition_provider,
|
||||
);
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ influxdb_influxql_parser = { path = "../influxdb_influxql_parser" }
|
|||
itertools = "0.10.5"
|
||||
object_store = "0.5.1"
|
||||
observability_deps = { path = "../observability_deps" }
|
||||
once_cell = { version = "1.16.0", features = ["parking_lot"] }
|
||||
parking_lot = "0.12"
|
||||
parquet_file = { path = "../parquet_file" }
|
||||
query_functions = { path = "../query_functions"}
|
||||
|
|
|
@ -71,6 +71,14 @@ impl DedicatedExecutors {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn new_testing() -> Self {
|
||||
Self {
|
||||
query_exec: DedicatedExecutor::new_testing(),
|
||||
reorg_exec: DedicatedExecutor::new_testing(),
|
||||
num_threads: 1,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn num_threads(&self) -> usize {
|
||||
self.num_threads
|
||||
}
|
||||
|
@ -114,11 +122,23 @@ impl Executor {
|
|||
})
|
||||
}
|
||||
|
||||
/// Create new executor based on a specific config.
|
||||
pub fn new_with_config(config: ExecutorConfig) -> Self {
|
||||
let executors = Arc::new(DedicatedExecutors::new(config.num_threads));
|
||||
Self::new_with_config_and_executors(config, executors)
|
||||
}
|
||||
|
||||
/// Get testing executor.
|
||||
pub fn new_testing() -> Self {
|
||||
let config = ExecutorConfig {
|
||||
num_threads: 1,
|
||||
target_query_partitions: 1,
|
||||
object_stores: HashMap::default(),
|
||||
};
|
||||
let executors = Arc::new(DedicatedExecutors::new_testing());
|
||||
Self::new_with_config_and_executors(config, executors)
|
||||
}
|
||||
|
||||
/// Low-level constructor.
|
||||
///
|
||||
/// This is mostly useful if you wanna keep the executors (because they are quiet expensive to create) but need a fresh IOx runtime.
|
||||
|
@ -336,12 +356,10 @@ mod tests {
|
|||
let expected_strings = to_set(&["Foo", "Bar"]);
|
||||
let plan = StringSetPlan::Known(Arc::clone(&expected_strings));
|
||||
|
||||
let exec = Executor::new(1);
|
||||
let exec = Executor::new_testing();
|
||||
let ctx = exec.new_context(ExecutorType::Query);
|
||||
let result_strings = ctx.to_string_set(plan).await.unwrap();
|
||||
assert_eq!(result_strings, expected_strings);
|
||||
|
||||
exec.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -351,13 +369,11 @@ mod tests {
|
|||
let scan = make_plan(schema, vec![]);
|
||||
let plan: StringSetPlan = vec![scan].into();
|
||||
|
||||
let exec = Executor::new(1);
|
||||
let exec = Executor::new_testing();
|
||||
let ctx = exec.new_context(ExecutorType::Query);
|
||||
let results = ctx.to_string_set(plan).await.unwrap();
|
||||
|
||||
assert_eq!(results, StringSetRef::new(StringSet::new()));
|
||||
|
||||
exec.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -369,13 +385,11 @@ mod tests {
|
|||
let scan = make_plan(batch.schema(), vec![batch]);
|
||||
let plan: StringSetPlan = vec![scan].into();
|
||||
|
||||
let exec = Executor::new(1);
|
||||
let exec = Executor::new_testing();
|
||||
let ctx = exec.new_context(ExecutorType::Query);
|
||||
let results = ctx.to_string_set(plan).await.unwrap();
|
||||
|
||||
assert_eq!(results, to_set(&["foo", "bar", "baz"]));
|
||||
|
||||
exec.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -391,13 +405,11 @@ mod tests {
|
|||
let scan = make_plan(schema, vec![batch1, batch2]);
|
||||
let plan: StringSetPlan = vec![scan].into();
|
||||
|
||||
let exec = Executor::new(1);
|
||||
let exec = Executor::new_testing();
|
||||
let ctx = exec.new_context(ExecutorType::Query);
|
||||
let results = ctx.to_string_set(plan).await.unwrap();
|
||||
|
||||
assert_eq!(results, to_set(&["foo", "bar", "baz"]));
|
||||
|
||||
exec.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -417,13 +429,11 @@ mod tests {
|
|||
|
||||
let plan: StringSetPlan = vec![scan1, scan2].into();
|
||||
|
||||
let exec = Executor::new(1);
|
||||
let exec = Executor::new_testing();
|
||||
let ctx = exec.new_context(ExecutorType::Query);
|
||||
let results = ctx.to_string_set(plan).await.unwrap();
|
||||
|
||||
assert_eq!(results, to_set(&["foo", "bar", "baz"]));
|
||||
|
||||
exec.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -438,7 +448,7 @@ mod tests {
|
|||
let scan = make_plan(schema, vec![batch]);
|
||||
let plan: StringSetPlan = vec![scan].into();
|
||||
|
||||
let exec = Executor::new(1);
|
||||
let exec = Executor::new_testing();
|
||||
let ctx = exec.new_context(ExecutorType::Query);
|
||||
let results = ctx.to_string_set(plan).await;
|
||||
|
||||
|
@ -453,8 +463,6 @@ mod tests {
|
|||
expected_error,
|
||||
actual_error,
|
||||
);
|
||||
|
||||
exec.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -466,7 +474,7 @@ mod tests {
|
|||
let scan = make_plan(batch.schema(), vec![batch]);
|
||||
let plan: StringSetPlan = vec![scan].into();
|
||||
|
||||
let exec = Executor::new(1);
|
||||
let exec = Executor::new_testing();
|
||||
let ctx = exec.new_context(ExecutorType::Query);
|
||||
let results = ctx.to_string_set(plan).await;
|
||||
|
||||
|
@ -482,8 +490,6 @@ mod tests {
|
|||
expected_error,
|
||||
actual_error
|
||||
);
|
||||
|
||||
exec.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -500,13 +506,11 @@ mod tests {
|
|||
let pivot = make_schema_pivot(scan);
|
||||
let plan = vec![pivot].into();
|
||||
|
||||
let exec = Executor::new(1);
|
||||
let exec = Executor::new_testing();
|
||||
let ctx = exec.new_context(ExecutorType::Query);
|
||||
let results = ctx.to_string_set(plan).await.expect("Executed plan");
|
||||
|
||||
assert_eq!(results, to_set(&["f1", "f2"]));
|
||||
|
||||
exec.join().await;
|
||||
}
|
||||
|
||||
/// return a set for testing
|
||||
|
|
|
@ -46,7 +46,6 @@ use datafusion_util::config::{iox_session_config, DEFAULT_CATALOG};
|
|||
use executor::DedicatedExecutor;
|
||||
use futures::{Stream, StreamExt, TryStreamExt};
|
||||
use observability_deps::tracing::debug;
|
||||
use once_cell::sync::Lazy;
|
||||
use query_functions::selectors::register_selector_aggregates;
|
||||
use std::{convert::TryInto, fmt, sync::Arc};
|
||||
use trace::{
|
||||
|
@ -262,10 +261,6 @@ impl fmt::Debug for IOxSessionContext {
|
|||
}
|
||||
}
|
||||
|
||||
/// [`DedicatedExecutor`] for testing purposes.
|
||||
static TESTING_EXECUTOR: Lazy<DedicatedExecutor> =
|
||||
Lazy::new(|| DedicatedExecutor::new("testing", 1));
|
||||
|
||||
impl IOxSessionContext {
|
||||
/// Constructor for testing.
|
||||
///
|
||||
|
@ -274,7 +269,7 @@ impl IOxSessionContext {
|
|||
pub fn with_testing() -> Self {
|
||||
Self {
|
||||
inner: SessionContext::default(),
|
||||
exec: TESTING_EXECUTOR.clone(),
|
||||
exec: DedicatedExecutor::new_testing(),
|
||||
recorder: SpanRecorder::default(),
|
||||
}
|
||||
}
|
||||
|
@ -413,8 +408,6 @@ impl IOxSessionContext {
|
|||
|
||||
/// Executes the SeriesSetPlans on the query executor, in
|
||||
/// parallel, producing series or groups
|
||||
///
|
||||
/// TODO make this streaming rather than buffering the results
|
||||
pub async fn to_series_and_groups(
|
||||
&self,
|
||||
series_set_plans: SeriesSetPlans,
|
||||
|
@ -506,7 +499,10 @@ impl IOxSessionContext {
|
|||
.await?
|
||||
.into_fieldlist()
|
||||
.map_err(|e| {
|
||||
Error::Execution(format!("Error converting to field list: {}", e))
|
||||
Error::Context(
|
||||
"Error converting to field list".to_string(),
|
||||
Box::new(Error::External(Box::new(e))),
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(field_list)
|
||||
|
@ -529,9 +525,12 @@ impl IOxSessionContext {
|
|||
}
|
||||
|
||||
// TODO: Stream this
|
||||
results
|
||||
.into_fieldlist()
|
||||
.map_err(|e| Error::Execution(format!("Error converting to field list: {}", e)))
|
||||
results.into_fieldlist().map_err(|e| {
|
||||
Error::Context(
|
||||
"Error converting to field list".to_string(),
|
||||
Box::new(Error::External(Box::new(e))),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Executes this plan on the query pool, and returns the
|
||||
|
@ -544,7 +543,12 @@ impl IOxSessionContext {
|
|||
.run_logical_plans(plans)
|
||||
.await?
|
||||
.into_stringset()
|
||||
.map_err(|e| Error::Execution(format!("Error converting to stringset: {}", e))),
|
||||
.map_err(|e| {
|
||||
Error::Context(
|
||||
"Error converting to stringset".to_string(),
|
||||
Box::new(Error::External(Box::new(e))),
|
||||
)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -592,9 +596,12 @@ impl IOxSessionContext {
|
|||
Fut: std::future::Future<Output = Result<T>> + Send + 'static,
|
||||
T: Send + 'static,
|
||||
{
|
||||
exec.spawn(fut)
|
||||
.await
|
||||
.unwrap_or_else(|e| Err(Error::Execution(format!("Join Error: {}", e))))
|
||||
exec.spawn(fut).await.unwrap_or_else(|e| {
|
||||
Err(Error::Context(
|
||||
"Join Error".to_string(),
|
||||
Box::new(Error::External(Box::new(e))),
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns a IOxSessionContext with a SpanRecorder that is a child of the current
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -5,10 +5,10 @@ use std::{convert::TryFrom, fmt, sync::Arc};
|
|||
|
||||
use arrow::{
|
||||
array::{
|
||||
ArrayRef, BooleanArray, Float64Array, Int64Array, StringArray, TimestampNanosecondArray,
|
||||
UInt64Array,
|
||||
Array, ArrayRef, BooleanArray, Float64Array, Int64Array, StringArray,
|
||||
TimestampNanosecondArray, UInt64Array,
|
||||
},
|
||||
bitmap::Bitmap,
|
||||
compute,
|
||||
datatypes::DataType as ArrowDataType,
|
||||
};
|
||||
use predicate::rpc_predicate::{FIELD_COLUMN_NAME, MEASUREMENT_COLUMN_NAME};
|
||||
|
@ -146,51 +146,43 @@ impl TryFrom<SeriesSet> for Vec<Series> {
|
|||
impl SeriesSet {
|
||||
/// Returns true if the array is entirely null between start_row and
|
||||
/// start_row+num_rows
|
||||
fn is_all_null(arr: &ArrayRef, start_row: usize, num_rows: usize) -> bool {
|
||||
let end_row = start_row + num_rows;
|
||||
(start_row..end_row).all(|i| arr.is_null(i))
|
||||
fn is_all_null(arr: &ArrayRef) -> bool {
|
||||
arr.null_count() == arr.len()
|
||||
}
|
||||
|
||||
pub fn is_timestamp_all_null(&self) -> bool {
|
||||
let start_row = self.start_row;
|
||||
let num_rows = self.num_rows;
|
||||
|
||||
self.field_indexes.iter().all(|field_index| {
|
||||
let array = self.batch.column(field_index.timestamp_index);
|
||||
Self::is_all_null(array, start_row, num_rows)
|
||||
Self::is_all_null(array)
|
||||
})
|
||||
}
|
||||
|
||||
// Convert and append the values from a single field to a Series
|
||||
// appended to `frames`
|
||||
fn field_to_series(&self, index: &FieldIndex) -> Result<Option<Series>> {
|
||||
let batch = &self.batch;
|
||||
let batch = self.batch.slice(self.start_row, self.num_rows);
|
||||
let schema = batch.schema();
|
||||
|
||||
let field = schema.field(index.value_index);
|
||||
let array = batch.column(index.value_index);
|
||||
|
||||
let start_row = self.start_row;
|
||||
let num_rows = self.num_rows;
|
||||
|
||||
// No values for this field are in the array so it does not
|
||||
// contribute to a series.
|
||||
if field.is_nullable() && Self::is_all_null(array, start_row, num_rows) {
|
||||
if field.is_nullable() && Self::is_all_null(array) {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let tags = self.create_frame_tags(schema.field(index.value_index).name());
|
||||
|
||||
// Only take timestamps (and values) from the rows that have non
|
||||
// null values for this field
|
||||
let valid = array.data().null_bitmap();
|
||||
|
||||
let timestamps = batch
|
||||
.column(index.timestamp_index)
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampNanosecondArray>()
|
||||
.unwrap()
|
||||
.extract_values(start_row, num_rows, valid);
|
||||
let timestamps = compute::nullif(
|
||||
batch.column(index.timestamp_index),
|
||||
&compute::is_null(array).expect("is_null"),
|
||||
)
|
||||
.expect("null handling")
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampNanosecondArray>()
|
||||
.unwrap()
|
||||
.extract_values();
|
||||
|
||||
let data = match array.data_type() {
|
||||
ArrowDataType::Utf8 => {
|
||||
|
@ -198,7 +190,7 @@ impl SeriesSet {
|
|||
.as_any()
|
||||
.downcast_ref::<StringArray>()
|
||||
.unwrap()
|
||||
.extract_values(start_row, num_rows, valid);
|
||||
.extract_values();
|
||||
Data::StringPoints { timestamps, values }
|
||||
}
|
||||
ArrowDataType::Float64 => {
|
||||
|
@ -206,7 +198,7 @@ impl SeriesSet {
|
|||
.as_any()
|
||||
.downcast_ref::<Float64Array>()
|
||||
.unwrap()
|
||||
.extract_values(start_row, num_rows, valid);
|
||||
.extract_values();
|
||||
|
||||
Data::FloatPoints { timestamps, values }
|
||||
}
|
||||
|
@ -215,7 +207,7 @@ impl SeriesSet {
|
|||
.as_any()
|
||||
.downcast_ref::<Int64Array>()
|
||||
.unwrap()
|
||||
.extract_values(start_row, num_rows, valid);
|
||||
.extract_values();
|
||||
Data::IntegerPoints { timestamps, values }
|
||||
}
|
||||
ArrowDataType::UInt64 => {
|
||||
|
@ -223,7 +215,7 @@ impl SeriesSet {
|
|||
.as_any()
|
||||
.downcast_ref::<UInt64Array>()
|
||||
.unwrap()
|
||||
.extract_values(start_row, num_rows, valid);
|
||||
.extract_values();
|
||||
Data::UnsignedPoints { timestamps, values }
|
||||
}
|
||||
ArrowDataType::Boolean => {
|
||||
|
@ -231,7 +223,7 @@ impl SeriesSet {
|
|||
.as_any()
|
||||
.downcast_ref::<BooleanArray>()
|
||||
.unwrap()
|
||||
.extract_values(start_row, num_rows, valid);
|
||||
.extract_values();
|
||||
Data::BooleanPoints { timestamps, values }
|
||||
}
|
||||
_ => {
|
||||
|
@ -345,47 +337,23 @@ fn fmt_strings(f: &mut fmt::Formatter<'_>, strings: &[Arc<str>]) -> fmt::Result
|
|||
}
|
||||
|
||||
trait ExtractValues<T> {
|
||||
/// Extracts num_rows of data starting from start_row as a vector,
|
||||
/// Extracts rows as a vector,
|
||||
/// for all rows `i` where `valid[i]` is set
|
||||
fn extract_values(&self, start_row: usize, num_rows: usize, valid: Option<&Bitmap>) -> Vec<T>;
|
||||
fn extract_values(&self) -> Vec<T>;
|
||||
}
|
||||
|
||||
/// Implements extract_values for a particular type of array that
|
||||
macro_rules! extract_values_impl {
|
||||
($DATA_TYPE:ty) => {
|
||||
fn extract_values(
|
||||
&self,
|
||||
start_row: usize,
|
||||
num_rows: usize,
|
||||
valid: Option<&Bitmap>,
|
||||
) -> Vec<$DATA_TYPE> {
|
||||
let end_row = start_row + num_rows;
|
||||
match valid {
|
||||
Some(valid) => (start_row..end_row)
|
||||
.filter_map(|row| valid.is_set(row).then(|| self.value(row)))
|
||||
.collect(),
|
||||
None => (start_row..end_row).map(|row| self.value(row)).collect(),
|
||||
}
|
||||
fn extract_values(&self) -> Vec<$DATA_TYPE> {
|
||||
self.iter().flatten().collect()
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl ExtractValues<String> for StringArray {
|
||||
fn extract_values(
|
||||
&self,
|
||||
start_row: usize,
|
||||
num_rows: usize,
|
||||
valid: Option<&Bitmap>,
|
||||
) -> Vec<String> {
|
||||
let end_row = start_row + num_rows;
|
||||
match valid {
|
||||
Some(valid) => (start_row..end_row)
|
||||
.filter_map(|row| valid.is_set(row).then(|| self.value(row).to_string()))
|
||||
.collect(),
|
||||
None => (start_row..end_row)
|
||||
.map(|row| self.value(row).to_string())
|
||||
.collect(),
|
||||
}
|
||||
fn extract_values(&self) -> Vec<String> {
|
||||
self.iter().flatten().map(str::to_string).collect()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ mod test {
|
|||
let logical_plan = scan_plan.plan_builder.build().unwrap();
|
||||
|
||||
// Build physical plan
|
||||
let executor = Executor::new(1);
|
||||
let executor = Executor::new_testing();
|
||||
let physical_plan = executor
|
||||
.new_context(ExecutorType::Reorg)
|
||||
.create_physical_plan(&logical_plan)
|
||||
|
@ -123,7 +123,7 @@ mod test {
|
|||
let logical_plan = scan_plan.plan_builder.build().unwrap();
|
||||
|
||||
// Build physical plan
|
||||
let executor = Executor::new(1);
|
||||
let executor = Executor::new_testing();
|
||||
let physical_plan = executor
|
||||
.new_context(ExecutorType::Reorg)
|
||||
.create_physical_plan(&logical_plan)
|
||||
|
@ -189,7 +189,7 @@ mod test {
|
|||
let logical_plan = scan_plan.plan_builder.build().unwrap();
|
||||
|
||||
// Build physical plan
|
||||
let executor = Executor::new(1);
|
||||
let executor = Executor::new_testing();
|
||||
let physical_plan = executor
|
||||
.new_context(ExecutorType::Reorg)
|
||||
.create_physical_plan(&logical_plan)
|
||||
|
@ -233,7 +233,7 @@ mod test {
|
|||
.split_plan(Arc::from("t"), schema, chunks, sort_key, vec![1000])
|
||||
.expect("created compact plan");
|
||||
|
||||
let executor = Executor::new(1);
|
||||
let executor = Executor::new_testing();
|
||||
let plan = executor
|
||||
.new_context(ExecutorType::Reorg)
|
||||
.create_physical_plan(&split_plan)
|
||||
|
@ -277,8 +277,6 @@ mod test {
|
|||
.unwrap();
|
||||
|
||||
assert_extracted_metrics!(extracted, 8);
|
||||
|
||||
executor.join().await;
|
||||
}
|
||||
|
||||
// Extracted baseline metrics for the specified operator
|
||||
|
|
|
@ -640,7 +640,7 @@ impl InfluxRpcPlanner {
|
|||
.with_predicate(predicate)
|
||||
.build()?;
|
||||
|
||||
let tag_name_is_not_null = Expr::Column(tag_name.into()).is_not_null();
|
||||
let tag_name_is_not_null = tag_name.as_expr().is_not_null();
|
||||
|
||||
// TODO: optimize this to use "DISINCT" or do
|
||||
// something more intelligent that simply fetching all
|
||||
|
@ -1431,10 +1431,7 @@ fn columns_in_predicates(
|
|||
match &predicate.field_columns {
|
||||
Some(field_columns) => {
|
||||
for field in field_columns {
|
||||
columns.insert(Column {
|
||||
relation: None,
|
||||
name: (*field).clone(),
|
||||
});
|
||||
columns.insert(Column::from_name(field));
|
||||
}
|
||||
}
|
||||
None => {
|
||||
|
@ -1950,7 +1947,7 @@ mod tests {
|
|||
.with_one_row_of_data(),
|
||||
);
|
||||
// index of columns in the above chunk: [bar, foo, i64_field, i64_field_2, time]
|
||||
let executor = Arc::new(Executor::new(1));
|
||||
let executor = Arc::new(Executor::new_testing());
|
||||
let test_db = Arc::new(TestDatabase::new(Arc::clone(&executor)));
|
||||
test_db.add_chunk("my_partition_key", Arc::clone(&chunk0));
|
||||
let table = "h2o";
|
||||
|
@ -2033,7 +2030,7 @@ mod tests {
|
|||
.with_one_row_of_data(),
|
||||
);
|
||||
|
||||
let executor = Arc::new(Executor::new(1));
|
||||
let executor = Arc::new(Executor::new_testing());
|
||||
let test_db = Arc::new(TestDatabase::new(Arc::clone(&executor)));
|
||||
test_db.add_chunk("my_partition_key", Arc::clone(&chunk0));
|
||||
let ctx = test_db.new_query_context(None);
|
||||
|
@ -2068,7 +2065,6 @@ mod tests {
|
|||
assert_eq!(chunk_schema.field(2).1.name(), "i64_field");
|
||||
assert_eq!(chunk_schema.field(3).1.name(), "i64_field_2");
|
||||
assert_eq!(chunk_schema.field(4).1.name(), TIME_COLUMN_NAME);
|
||||
executor.join().await;
|
||||
|
||||
////////////////////////////
|
||||
// Test 2: no need_fields
|
||||
|
@ -2096,7 +2092,6 @@ mod tests {
|
|||
assert_eq!(chunk_schema.field(1).1.name(), "foo");
|
||||
assert_eq!(chunk_schema.field(2).1.name(), "i64_field");
|
||||
assert_eq!(chunk_schema.field(3).1.name(), "i64_field_2");
|
||||
executor.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -2112,7 +2107,7 @@ mod tests {
|
|||
.with_one_row_of_data(),
|
||||
);
|
||||
|
||||
let executor = Arc::new(Executor::new(1));
|
||||
let executor = Arc::new(Executor::new_testing());
|
||||
let test_db = Arc::new(TestDatabase::new(Arc::clone(&executor)));
|
||||
test_db.add_chunk("my_partition_key", Arc::clone(&chunk0));
|
||||
let ctx = test_db.new_query_context(None);
|
||||
|
@ -2144,7 +2139,6 @@ mod tests {
|
|||
assert_eq!(chunk_schema.field(2).1.name(), "i64_field");
|
||||
assert_eq!(chunk_schema.field(3).1.name(), "i64_field_2");
|
||||
assert_eq!(chunk_schema.field(4).1.name(), TIME_COLUMN_NAME);
|
||||
executor.join().await;
|
||||
|
||||
/////////////
|
||||
// Test 2: empty predicate without need_fields
|
||||
|
@ -2166,7 +2160,6 @@ mod tests {
|
|||
let chunk = &result[0].2[0];
|
||||
let chunk_schema = (*chunk.schema()).clone();
|
||||
assert_eq!(chunk_schema.len(), 5);
|
||||
executor.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -2181,7 +2174,7 @@ mod tests {
|
|||
.with_time_column(), // no row added for this chunk on purpose
|
||||
);
|
||||
|
||||
let executor = Arc::new(Executor::new(1));
|
||||
let executor = Arc::new(Executor::new_testing());
|
||||
let test_db = Arc::new(TestDatabase::new(Arc::clone(&executor)));
|
||||
test_db.add_chunk("my_partition_key", Arc::clone(&chunk0));
|
||||
let ctx = test_db.new_query_context(None);
|
||||
|
@ -2212,7 +2205,6 @@ mod tests {
|
|||
assert_eq!(chunk_schema.field(2).1.name(), "i64_field");
|
||||
assert_eq!(chunk_schema.field(3).1.name(), "i64_field_2");
|
||||
assert_eq!(chunk_schema.field(4).1.name(), TIME_COLUMN_NAME);
|
||||
executor.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -2228,7 +2220,7 @@ mod tests {
|
|||
.with_one_row_of_data(),
|
||||
);
|
||||
|
||||
let executor = Arc::new(Executor::new(1));
|
||||
let executor = Arc::new(Executor::new_testing());
|
||||
let test_db = Arc::new(TestDatabase::new(Arc::clone(&executor)));
|
||||
test_db.add_chunk("my_partition_key", Arc::clone(&chunk0));
|
||||
let ctx = test_db.new_query_context(None);
|
||||
|
@ -2262,7 +2254,6 @@ mod tests {
|
|||
assert_eq!(chunk_schema.field(2).1.name(), "i64_field");
|
||||
assert_eq!(chunk_schema.field(3).1.name(), "i64_field_2");
|
||||
assert_eq!(chunk_schema.field(4).1.name(), TIME_COLUMN_NAME);
|
||||
executor.join().await;
|
||||
|
||||
/////////////
|
||||
// Test 2: predicate on tag `foo` and `field_columns` is not empty
|
||||
|
@ -2295,7 +2286,6 @@ mod tests {
|
|||
assert_eq!(chunk_schema.field(2).1.name(), "i64_field");
|
||||
assert_eq!(chunk_schema.field(3).1.name(), "i64_field_2");
|
||||
assert_eq!(chunk_schema.field(4).1.name(), TIME_COLUMN_NAME);
|
||||
executor.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -2311,7 +2301,7 @@ mod tests {
|
|||
.with_one_row_of_data(),
|
||||
);
|
||||
|
||||
let executor = Arc::new(Executor::new(1));
|
||||
let executor = Arc::new(Executor::new_testing());
|
||||
let test_db = Arc::new(TestDatabase::new(Arc::clone(&executor)));
|
||||
test_db.add_chunk("my_partition_key", Arc::clone(&chunk0));
|
||||
let ctx = test_db.new_query_context(None);
|
||||
|
@ -2342,7 +2332,6 @@ mod tests {
|
|||
assert_eq!(chunk_schema.field(2).1.name(), "i64_field");
|
||||
assert_eq!(chunk_schema.field(3).1.name(), "i64_field_2");
|
||||
assert_eq!(chunk_schema.field(4).1.name(), TIME_COLUMN_NAME);
|
||||
executor.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -2529,7 +2518,7 @@ mod tests {
|
|||
.with_time_column(),
|
||||
);
|
||||
|
||||
let executor = Arc::new(Executor::new(1));
|
||||
let executor = Arc::new(Executor::new_testing());
|
||||
let test_db = Arc::new(TestDatabase::new(Arc::clone(&executor)));
|
||||
test_db.add_chunk("my_partition_key", Arc::clone(&chunk0));
|
||||
|
||||
|
@ -2545,7 +2534,5 @@ mod tests {
|
|||
"\nActual: {:?}\nExpected: {:?}",
|
||||
actual_predicate, expected_predicate
|
||||
);
|
||||
|
||||
executor.join().await;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -325,7 +325,7 @@ mod test {
|
|||
];
|
||||
|
||||
// executor has only 1 thread
|
||||
let executor = Executor::new(1);
|
||||
let executor = Executor::new_testing();
|
||||
for chunks in chunk_orders {
|
||||
let sort_key = SortKeyBuilder::with_capacity(2)
|
||||
.with_col_opts("tag1", false, true)
|
||||
|
@ -356,8 +356,6 @@ mod test {
|
|||
|
||||
assert_batches_eq!(&expected, &batches);
|
||||
}
|
||||
|
||||
executor.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -375,7 +373,7 @@ mod test {
|
|||
.compact_plan(Arc::from("t"), schema, chunks, sort_key)
|
||||
.expect("created compact plan");
|
||||
|
||||
let executor = Executor::new(1);
|
||||
let executor = Executor::new_testing();
|
||||
let physical_plan = executor
|
||||
.new_context(ExecutorType::Reorg)
|
||||
.create_physical_plan(&compact_plan)
|
||||
|
@ -407,8 +405,6 @@ mod test {
|
|||
];
|
||||
|
||||
assert_batches_eq!(&expected, &batches);
|
||||
|
||||
executor.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -428,7 +424,7 @@ mod test {
|
|||
.split_plan(Arc::from("t"), schema, chunks, sort_key, vec![1000])
|
||||
.expect("created compact plan");
|
||||
|
||||
let executor = Executor::new(1);
|
||||
let executor = Executor::new_testing();
|
||||
let physical_plan = executor
|
||||
.new_context(ExecutorType::Reorg)
|
||||
.create_physical_plan(&split_plan)
|
||||
|
@ -473,8 +469,6 @@ mod test {
|
|||
];
|
||||
|
||||
assert_batches_eq!(&expected, &batches1);
|
||||
|
||||
executor.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -494,7 +488,7 @@ mod test {
|
|||
.split_plan(Arc::from("t"), schema, chunks, sort_key, vec![1000, 7000])
|
||||
.expect("created compact plan");
|
||||
|
||||
let executor = Executor::new(1);
|
||||
let executor = Executor::new_testing();
|
||||
let physical_plan = executor
|
||||
.new_context(ExecutorType::Reorg)
|
||||
.create_physical_plan(&split_plan)
|
||||
|
@ -550,8 +544,6 @@ mod test {
|
|||
"+-----------+------------+------+-----------------------------+",
|
||||
];
|
||||
assert_batches_eq!(&expected, &batches2);
|
||||
|
||||
executor.join().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
|
|
@ -79,7 +79,7 @@ mod test {
|
|||
.with_one_row_of_data(),
|
||||
);
|
||||
// index of columns in the above chunk: [bar, foo, i64_field, i64_field_2, time]
|
||||
let executor = Arc::new(Executor::new(1));
|
||||
let executor = Arc::new(Executor::new_testing());
|
||||
let ctx = executor.new_context(ExecutorType::Query);
|
||||
let test_db = Arc::new(TestDatabase::new(Arc::clone(&executor)));
|
||||
test_db.add_chunk("my_partition_key", Arc::clone(&chunk0));
|
||||
|
|
|
@ -221,12 +221,10 @@ mod tests {
|
|||
let expected_ss = to_string_set(&["foo", "bar", "baz", "from_a_plan"]).into();
|
||||
|
||||
assert!(matches!(plan, StringSetPlan::Plan(_)));
|
||||
let exec = Executor::new(1);
|
||||
let exec = Executor::new_testing();
|
||||
let ctx = exec.new_context(ExecutorType::Query);
|
||||
let ss = ctx.to_string_set(plan).await.unwrap();
|
||||
assert_eq!(ss, expected_ss);
|
||||
|
||||
exec.join().await;
|
||||
}
|
||||
|
||||
fn to_string_set(v: &[&str]) -> StringSet {
|
||||
|
|
|
@ -1371,7 +1371,9 @@ mod test {
|
|||
use super::*;
|
||||
use crate::test::{raw_data, TestChunk};
|
||||
use arrow::datatypes::DataType;
|
||||
use arrow_util::{assert_batches_eq, assert_batches_sorted_eq};
|
||||
use arrow_util::{
|
||||
assert_batches_eq, assert_batches_sorted_eq, test_util::equalize_batch_schemas,
|
||||
};
|
||||
use datafusion::physical_plan::displayable;
|
||||
use datafusion_util::test_collect;
|
||||
use schema::{builder::SchemaBuilder, TIME_COLUMN_NAME};
|
||||
|
@ -2170,27 +2172,30 @@ mod test {
|
|||
let chunks = vec![chunk1, chunk2, chunk3];
|
||||
// data in its original form
|
||||
let expected = vec![
|
||||
"+-----------+------+--------------------------------+--------------------------------+",
|
||||
"| field_int | tag1 | tag2 | time |",
|
||||
"+-----------+------+--------------------------------+--------------------------------+",
|
||||
"| 1000 | MT | CT | 1970-01-01T00:00:00.000001Z |",
|
||||
"| 10 | MT | AL | 1970-01-01T00:00:00.000007Z |",
|
||||
"| 70 | CT | CT | 1970-01-01T00:00:00.000000100Z |",
|
||||
"| 100 | AL | MA | 1970-01-01T00:00:00.000000050Z |",
|
||||
"| 5 | MT | AL | 1970-01-01T00:00:00.000005Z |",
|
||||
"| 1000 | MT | 1970-01-01T00:00:00.000001Z | |",
|
||||
"| 10 | MT | 1970-01-01T00:00:00.000007Z | |",
|
||||
"| 70 | CT | 1970-01-01T00:00:00.000000100Z | |",
|
||||
"| 100 | AL | 1970-01-01T00:00:00.000000050Z | |",
|
||||
"| 5 | MT | 1970-01-01T00:00:00.000005Z | |",
|
||||
"| 1000 | MT | 1970-01-01T00:00:00.000001Z | |",
|
||||
"| 10 | MT | 1970-01-01T00:00:00.000007Z | |",
|
||||
"| 70 | CT | 1970-01-01T00:00:00.000000100Z | |",
|
||||
"| 100 | AL | 1970-01-01T00:00:00.000000050Z | |",
|
||||
"| 5 | MT | 1970-01-01T00:00:00.000005Z | |",
|
||||
"+-----------+------+--------------------------------+--------------------------------+",
|
||||
"+-----------+------+------+--------------------------------+-----------------+",
|
||||
"| field_int | tag1 | tag2 | time | other_field_int |",
|
||||
"+-----------+------+------+--------------------------------+-----------------+",
|
||||
"| 1000 | MT | CT | 1970-01-01T00:00:00.000001Z | |",
|
||||
"| 10 | MT | AL | 1970-01-01T00:00:00.000007Z | |",
|
||||
"| 70 | CT | CT | 1970-01-01T00:00:00.000000100Z | |",
|
||||
"| 100 | AL | MA | 1970-01-01T00:00:00.000000050Z | |",
|
||||
"| 5 | MT | AL | 1970-01-01T00:00:00.000005Z | |",
|
||||
"| | MT | | 1970-01-01T00:00:00.000001Z | 1000 |",
|
||||
"| | MT | | 1970-01-01T00:00:00.000007Z | 10 |",
|
||||
"| | CT | | 1970-01-01T00:00:00.000000100Z | 70 |",
|
||||
"| | AL | | 1970-01-01T00:00:00.000000050Z | 100 |",
|
||||
"| | MT | | 1970-01-01T00:00:00.000005Z | 5 |",
|
||||
"| | MT | | 1970-01-01T00:00:00.000001Z | 1000 |",
|
||||
"| | MT | | 1970-01-01T00:00:00.000007Z | 10 |",
|
||||
"| | CT | | 1970-01-01T00:00:00.000000100Z | 70 |",
|
||||
"| | AL | | 1970-01-01T00:00:00.000000050Z | 100 |",
|
||||
"| | MT | | 1970-01-01T00:00:00.000005Z | 5 |",
|
||||
"+-----------+------+------+--------------------------------+-----------------+",
|
||||
];
|
||||
assert_batches_eq!(&expected, &raw_data(&chunks).await);
|
||||
assert_batches_eq!(
|
||||
&expected,
|
||||
&equalize_batch_schemas(raw_data(&chunks).await).unwrap()
|
||||
);
|
||||
|
||||
// request just the fields
|
||||
let schema = SchemaBuilder::new()
|
||||
|
@ -2283,27 +2288,30 @@ mod test {
|
|||
let chunks = vec![chunk1, chunk2, chunk3];
|
||||
// data in its original form
|
||||
let expected = vec![
|
||||
"+-----------+------+------+--------------------------------+",
|
||||
"| field_int | tag1 | tag2 | time |",
|
||||
"+-----------+------+------+--------------------------------+",
|
||||
"| 1000 | MT | CT | 1970-01-01T00:00:00.000001Z |",
|
||||
"| 10 | MT | AL | 1970-01-01T00:00:00.000007Z |",
|
||||
"| 70 | CT | CT | 1970-01-01T00:00:00.000000100Z |",
|
||||
"| 100 | AL | MA | 1970-01-01T00:00:00.000000050Z |",
|
||||
"| 5 | MT | AL | 1970-01-01T00:00:00.000005Z |",
|
||||
"| 1000 | MT | CT | 1970-01-01T00:00:00.000001Z |",
|
||||
"| 10 | MT | MT | 1970-01-01T00:00:00.000007Z |",
|
||||
"| 70 | CT | AL | 1970-01-01T00:00:00.000000100Z |",
|
||||
"| 100 | AL | AL | 1970-01-01T00:00:00.000000050Z |",
|
||||
"| 5 | MT | MT | 1970-01-01T00:00:00.000005Z |",
|
||||
"| 1000 | 1000 | CT | 1970-01-01T00:00:00.000001Z |",
|
||||
"| 10 | 10 | MT | 1970-01-01T00:00:00.000007Z |",
|
||||
"| 70 | 70 | AL | 1970-01-01T00:00:00.000000100Z |",
|
||||
"| 100 | 100 | AL | 1970-01-01T00:00:00.000000050Z |",
|
||||
"| 5 | 5 | MT | 1970-01-01T00:00:00.000005Z |",
|
||||
"+-----------+------+------+--------------------------------+",
|
||||
"+-----------+------+------+--------------------------------+------+------------+",
|
||||
"| field_int | tag1 | tag2 | time | tag3 | field_int2 |",
|
||||
"+-----------+------+------+--------------------------------+------+------------+",
|
||||
"| 1000 | MT | CT | 1970-01-01T00:00:00.000001Z | | |",
|
||||
"| 10 | MT | AL | 1970-01-01T00:00:00.000007Z | | |",
|
||||
"| 70 | CT | CT | 1970-01-01T00:00:00.000000100Z | | |",
|
||||
"| 100 | AL | MA | 1970-01-01T00:00:00.000000050Z | | |",
|
||||
"| 5 | MT | AL | 1970-01-01T00:00:00.000005Z | | |",
|
||||
"| 1000 | MT | | 1970-01-01T00:00:00.000001Z | CT | |",
|
||||
"| 10 | MT | | 1970-01-01T00:00:00.000007Z | MT | |",
|
||||
"| 70 | CT | | 1970-01-01T00:00:00.000000100Z | AL | |",
|
||||
"| 100 | AL | | 1970-01-01T00:00:00.000000050Z | AL | |",
|
||||
"| 5 | MT | | 1970-01-01T00:00:00.000005Z | MT | |",
|
||||
"| 1000 | | | 1970-01-01T00:00:00.000001Z | CT | 1000 |",
|
||||
"| 10 | | | 1970-01-01T00:00:00.000007Z | MT | 10 |",
|
||||
"| 70 | | | 1970-01-01T00:00:00.000000100Z | AL | 70 |",
|
||||
"| 100 | | | 1970-01-01T00:00:00.000000050Z | AL | 100 |",
|
||||
"| 5 | | | 1970-01-01T00:00:00.000005Z | MT | 5 |",
|
||||
"+-----------+------+------+--------------------------------+------+------------+",
|
||||
];
|
||||
assert_batches_eq!(&expected, &raw_data(&chunks).await);
|
||||
assert_batches_eq!(
|
||||
&expected,
|
||||
&equalize_batch_schemas(raw_data(&chunks).await).unwrap()
|
||||
);
|
||||
|
||||
let output_sort_key = SortKey::from_columns(vec!["tag2", "tag1", "time"]);
|
||||
let sort_plan = Deduplicater::build_deduplicate_plan_for_overlapped_chunks(
|
||||
|
|
|
@ -214,6 +214,7 @@ pub async fn build_compactor_from_config(
|
|||
minutes_without_new_writes_to_be_cold,
|
||||
hot_compaction_hours_threshold_1,
|
||||
hot_compaction_hours_threshold_2,
|
||||
max_parallel_partitions,
|
||||
..
|
||||
} = compactor_config;
|
||||
|
||||
|
@ -231,6 +232,7 @@ pub async fn build_compactor_from_config(
|
|||
minutes_without_new_writes_to_be_cold,
|
||||
hot_compaction_hours_threshold_1,
|
||||
hot_compaction_hours_threshold_2,
|
||||
max_parallel_partitions,
|
||||
};
|
||||
|
||||
Ok(compactor::compact::Compactor::new(
|
||||
|
|
|
@ -196,7 +196,7 @@ mod tests {
|
|||
let object_store = Arc::new(InMemory::new()) as _;
|
||||
|
||||
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0)));
|
||||
let exec = Arc::new(Executor::new(1));
|
||||
let exec = Arc::new(Executor::new_testing());
|
||||
let catalog_cache = Arc::new(CatalogCache::new_testing(
|
||||
Arc::clone(&catalog),
|
||||
time_provider,
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
//! Routines for error handling
|
||||
|
||||
use datafusion::{arrow::error::ArrowError, error::DataFusionError};
|
||||
use datafusion::error::DataFusionError;
|
||||
|
||||
/// Converts a [`DataFusionError`] into the appropriate [`tonic::Code`]
|
||||
///
|
||||
|
@ -21,27 +20,8 @@ use datafusion::{arrow::error::ArrowError, error::DataFusionError};
|
|||
/// Basically because I wasn't sure they were all internal errors --
|
||||
/// for example, you can get an Arrow error if you try and divide a
|
||||
/// column by zero, depending on the data.
|
||||
pub fn datafusion_error_to_tonic_code(mut e: &DataFusionError) -> tonic::Code {
|
||||
// traverse potential error chains
|
||||
loop {
|
||||
// traverse context chain without recursion
|
||||
if let DataFusionError::Context(_msg, inner) = e {
|
||||
e = inner;
|
||||
continue;
|
||||
}
|
||||
|
||||
// The Arrow error may itself contain a datafusion error again
|
||||
// See https://github.com/apache/arrow-datafusion/issues/4172
|
||||
if let DataFusionError::ArrowError(ArrowError::ExternalError(inner)) = e {
|
||||
if let Some(inner) = inner.downcast_ref::<DataFusionError>() {
|
||||
e = inner;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// no more traversal
|
||||
break;
|
||||
}
|
||||
pub fn datafusion_error_to_tonic_code(e: &DataFusionError) -> tonic::Code {
|
||||
let e = e.find_root();
|
||||
|
||||
match e {
|
||||
DataFusionError::ResourcesExhausted(_) => tonic::Code::ResourceExhausted,
|
||||
|
@ -56,10 +36,6 @@ pub fn datafusion_error_to_tonic_code(mut e: &DataFusionError) -> tonic::Code {
|
|||
// Since we are not sure they are all internal errors we
|
||||
// classify them as InvalidArgument so the user has a chance
|
||||
// to see them
|
||||
//
|
||||
// Potential future TODO: we could inspect the error and
|
||||
// decide. e.g. For Box<dyn ...> we could downcast the type
|
||||
// if IOx only puts a single concrete enum in there.
|
||||
| DataFusionError::Execution(_)
|
||||
| DataFusionError::ArrowError(_)
|
||||
| DataFusionError::ParquetError(_)
|
||||
|
@ -104,37 +80,36 @@ mod test {
|
|||
let s = "foo".to_string();
|
||||
|
||||
// this is basically a second implementation of the translation table to help avoid mistakes
|
||||
do_test(
|
||||
do_transl_test(
|
||||
DataFusionError::ResourcesExhausted(s.clone()),
|
||||
tonic::Code::ResourceExhausted,
|
||||
);
|
||||
|
||||
let e = ParserError::ParserError(s.clone());
|
||||
do_test(DataFusionError::SQL(e), tonic::Code::InvalidArgument);
|
||||
do_transl_test(DataFusionError::SQL(e), tonic::Code::InvalidArgument);
|
||||
|
||||
do_test(
|
||||
do_transl_test(
|
||||
DataFusionError::NotImplemented(s.clone()),
|
||||
tonic::Code::InvalidArgument,
|
||||
);
|
||||
do_test(
|
||||
do_transl_test(
|
||||
DataFusionError::Plan(s.clone()),
|
||||
tonic::Code::InvalidArgument,
|
||||
);
|
||||
|
||||
do_test(DataFusionError::Internal(s), tonic::Code::Internal);
|
||||
}
|
||||
do_transl_test(DataFusionError::Internal(s), tonic::Code::Internal);
|
||||
|
||||
#[test]
|
||||
fn test_error_context_traversal() {
|
||||
let inner_error = DataFusionError::ResourcesExhausted("foo".to_string());
|
||||
|
||||
do_test(
|
||||
DataFusionError::Context("it happened!".to_string(), Box::new(inner_error)),
|
||||
// traversal
|
||||
do_transl_test(
|
||||
DataFusionError::Context(
|
||||
"it happened!".to_string(),
|
||||
Box::new(DataFusionError::ResourcesExhausted("foo".to_string())),
|
||||
),
|
||||
tonic::Code::ResourceExhausted,
|
||||
);
|
||||
}
|
||||
|
||||
fn do_test(e: DataFusionError, code: tonic::Code) {
|
||||
fn do_transl_test(e: DataFusionError, code: tonic::Code) {
|
||||
assert_eq!(datafusion_error_to_tonic_code(&e), code);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ impl TestDatabaseStore {
|
|||
));
|
||||
Self {
|
||||
databases: Mutex::new(BTreeMap::new()),
|
||||
executor: Arc::new(Executor::new(1)),
|
||||
executor: Arc::new(Executor::new_testing()),
|
||||
metric_registry,
|
||||
query_semaphore: Arc::new(semaphore_metrics.new_semaphore(semaphore_size)),
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ bytes = { version = "1", features = ["std"] }
|
|||
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "iana-time-zone", "serde", "std", "winapi"] }
|
||||
crossbeam-utils = { version = "0.8", features = ["std"] }
|
||||
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "fdc83e8524df30ac5d0ae097572b7c48dc686ba9", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "799dd747152f6574638a844986b8ea8470d3f4d6", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] }
|
||||
digest = { version = "0.10", features = ["alloc", "block-buffer", "core-api", "mac", "std", "subtle"] }
|
||||
either = { version = "1", features = ["use_std"] }
|
||||
fixedbitset = { version = "0.4", features = ["std"] }
|
||||
|
|
Loading…
Reference in New Issue