Merge branch 'main' into dom/wal-bench
commit
9e8ae1485f
|
@ -225,6 +225,43 @@ jobs:
|
|||
echo "No changes to commit"
|
||||
fi
|
||||
|
||||
test_rpc_write:
|
||||
# setup multiple docker images (see https://circleci.com/docs/2.0/configuration-reference/#docker)
|
||||
docker:
|
||||
- image: quay.io/influxdb/rust:ci
|
||||
- image: vectorized/redpanda:v22.1.5
|
||||
command: redpanda start --overprovisioned --smp 1 --memory 1G --reserve-memory 0M
|
||||
- image: postgres
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: trust
|
||||
resource_class: 2xlarge+ # use of a smaller executor tends crashes on link
|
||||
environment:
|
||||
# Disable incremental compilation to avoid overhead. We are not preserving these files anyway.
|
||||
CARGO_INCREMENTAL: "0"
|
||||
# Disable full debug symbol generation to speed up CI build
|
||||
# "1" means line tables only, which is useful for panic tracebacks.
|
||||
RUSTFLAGS: "-C debuginfo=1"
|
||||
# https://github.com/rust-lang/cargo/issues/10280
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI: "true"
|
||||
RUST_BACKTRACE: "1"
|
||||
# Run integration tests
|
||||
TEST_INTEGRATION: 1
|
||||
INFLUXDB_IOX_INTEGRATION_LOCAL: 1
|
||||
KAFKA_CONNECT: "localhost:9092"
|
||||
POSTGRES_USER: postgres
|
||||
TEST_INFLUXDB_IOX_CATALOG_DSN: "postgres://postgres@localhost/iox_shared"
|
||||
# When removing this, also remove the ignore on the test in trogging/src/cli.rs
|
||||
RUST_LOG: debug,,hyper::proto::h1=info,h2=info
|
||||
LOG_FILTER: debug,,hyper::proto::h1=info,h2=info
|
||||
steps:
|
||||
- checkout
|
||||
- rust_components
|
||||
- cache_restore
|
||||
- run:
|
||||
name: Cargo test RPC write path
|
||||
command: cargo test --workspace --features rpc_write
|
||||
- cache_save
|
||||
|
||||
test:
|
||||
# setup multiple docker images (see https://circleci.com/docs/2.0/configuration-reference/#docker)
|
||||
docker:
|
||||
|
@ -553,6 +590,7 @@ workflows:
|
|||
- protobuf-lint
|
||||
- docs-lint
|
||||
- test
|
||||
- test_rpc_write
|
||||
- test_heappy
|
||||
- build_dev
|
||||
- doc
|
||||
|
@ -572,6 +610,7 @@ workflows:
|
|||
- protobuf-lint
|
||||
- docs-lint
|
||||
- test
|
||||
- test_rpc_write
|
||||
- test_heappy
|
||||
- build_dev
|
||||
- build_release
|
||||
|
|
|
@ -9,5 +9,5 @@ indent_style = space
|
|||
[{Dockerfile*,*.proto}]
|
||||
indent_size = 2
|
||||
|
||||
[{*.rs,*.toml}]
|
||||
[{*.rs,*.toml,*.sh,*.bash}]
|
||||
indent_size = 4
|
||||
|
|
|
@ -1244,7 +1244,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "datafusion"
|
||||
version = "15.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145"
|
||||
dependencies = [
|
||||
"ahash 0.8.2",
|
||||
"arrow",
|
||||
|
@ -1276,7 +1276,7 @@ dependencies = [
|
|||
"pin-project-lite",
|
||||
"rand",
|
||||
"smallvec",
|
||||
"sqlparser 0.27.0",
|
||||
"sqlparser",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
|
@ -1289,31 +1289,31 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "datafusion-common"
|
||||
version = "15.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
"object_store",
|
||||
"parquet",
|
||||
"sqlparser 0.27.0",
|
||||
"sqlparser",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-expr"
|
||||
version = "15.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145"
|
||||
dependencies = [
|
||||
"ahash 0.8.2",
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
"log",
|
||||
"sqlparser 0.27.0",
|
||||
"sqlparser",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-optimizer"
|
||||
version = "15.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
|
@ -1328,7 +1328,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "datafusion-physical-expr"
|
||||
version = "15.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145"
|
||||
dependencies = [
|
||||
"ahash 0.8.2",
|
||||
"arrow",
|
||||
|
@ -1357,7 +1357,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "datafusion-proto"
|
||||
version = "15.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
|
@ -1374,7 +1374,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "datafusion-row"
|
||||
version = "15.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
|
@ -1385,13 +1385,13 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "datafusion-sql"
|
||||
version = "15.0.0"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=f2eb3b2bebffe75df06f3e55f2413728e7e19f2c#f2eb3b2bebffe75df06f3e55f2413728e7e19f2c"
|
||||
source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145"
|
||||
dependencies = [
|
||||
"arrow-schema",
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"log",
|
||||
"sqlparser 0.27.0",
|
||||
"sqlparser",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -1539,6 +1539,18 @@ dependencies = [
|
|||
"str-buf",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "escargot"
|
||||
version = "0.5.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f5584ba17d7ab26a8a7284f13e5bd196294dd2f2d79773cff29b9e9edef601a6"
|
||||
dependencies = [
|
||||
"log",
|
||||
"once_cell",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "event-listener"
|
||||
version = "2.5.3"
|
||||
|
@ -2412,7 +2424,7 @@ version = "0.1.0"
|
|||
dependencies = [
|
||||
"generated_types",
|
||||
"snafu",
|
||||
"sqlparser 0.28.0",
|
||||
"sqlparser",
|
||||
"workspace-hack",
|
||||
]
|
||||
|
||||
|
@ -3730,9 +3742,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "paste"
|
||||
version = "1.0.9"
|
||||
version = "1.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1"
|
||||
checksum = "cf1c2c742266c2f1041c914ba65355a83ae8747b05f208319784083583494b4b"
|
||||
|
||||
[[package]]
|
||||
name = "pbjson"
|
||||
|
@ -3948,7 +3960,7 @@ dependencies = [
|
|||
"query_functions",
|
||||
"schema",
|
||||
"snafu",
|
||||
"sqlparser 0.28.0",
|
||||
"sqlparser",
|
||||
"test_helpers",
|
||||
"workspace-hack",
|
||||
]
|
||||
|
@ -5083,15 +5095,6 @@ dependencies = [
|
|||
"unicode_categories",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.27.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aba319938d4bfe250a769ac88278b629701024fe16f34257f9563bc628081970"
|
||||
dependencies = [
|
||||
"log",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.28.0"
|
||||
|
@ -5405,6 +5408,7 @@ dependencies = [
|
|||
"assert_cmd",
|
||||
"bytes",
|
||||
"data_types",
|
||||
"escargot",
|
||||
"futures",
|
||||
"generated_types",
|
||||
"http",
|
||||
|
|
|
@ -114,8 +114,8 @@ license = "MIT OR Apache-2.0"
|
|||
[workspace.dependencies]
|
||||
arrow = { version = "28.0.0" }
|
||||
arrow-flight = { version = "28.0.0" }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="f2eb3b2bebffe75df06f3e55f2413728e7e19f2c", default-features = false }
|
||||
datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="f2eb3b2bebffe75df06f3e55f2413728e7e19f2c" }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="d33457c20c2b15d6a934e5b37ac9eb0d17e29145", default-features = false }
|
||||
datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="d33457c20c2b15d6a934e5b37ac9eb0d17e29145" }
|
||||
hashbrown = { version = "0.13.1" }
|
||||
parquet = { version = "28.0.0" }
|
||||
|
||||
|
|
|
@ -1,14 +1,8 @@
|
|||
//! Querier-related configs.
|
||||
use data_types::{IngesterMapping, ShardIndex};
|
||||
use snafu::Snafu;
|
||||
use std::{collections::HashMap, io, path::PathBuf, sync::Arc};
|
||||
|
||||
#[cfg(not(feature = "rpc_write"))]
|
||||
use serde::Deserialize;
|
||||
#[cfg(not(feature = "rpc_write"))]
|
||||
use snafu::ResultExt;
|
||||
#[cfg(not(feature = "rpc_write"))]
|
||||
use std::fs;
|
||||
use snafu::{ResultExt, Snafu};
|
||||
use std::{collections::HashMap, fs, io, path::PathBuf, sync::Arc};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[allow(missing_docs)]
|
||||
|
@ -25,7 +19,6 @@ pub enum Error {
|
|||
ingesters,
|
||||
shards,
|
||||
))]
|
||||
#[cfg(not(feature = "rpc_write"))]
|
||||
IgnoreAllRequiresEmptyConfig {
|
||||
ingesters: HashMap<Arc<str>, Arc<IngesterConfig>>,
|
||||
shards: HashMap<ShardIndex, ShardConfig>,
|
||||
|
@ -137,7 +130,6 @@ pub struct QuerierConfig {
|
|||
env = "INFLUXDB_IOX_SHARD_TO_INGESTERS_FILE",
|
||||
action
|
||||
)]
|
||||
#[cfg(not(feature = "rpc_write"))]
|
||||
pub shard_to_ingesters_file: Option<PathBuf>,
|
||||
|
||||
/// JSON containing a Shard index to ingesters gRPC mapping. For example:
|
||||
|
@ -207,7 +199,6 @@ pub struct QuerierConfig {
|
|||
env = "INFLUXDB_IOX_SHARD_TO_INGESTERS",
|
||||
action
|
||||
)]
|
||||
#[cfg(not(feature = "rpc_write"))]
|
||||
pub shard_to_ingesters: Option<String>,
|
||||
|
||||
/// gRPC address for the router to talk with the ingesters. For
|
||||
|
@ -220,11 +211,7 @@ pub struct QuerierConfig {
|
|||
/// "http://10.10.10.1:8083,http://10.10.10.2:8083"
|
||||
///
|
||||
/// for multiple addresses.
|
||||
#[clap(
|
||||
long = "ingester-addresses",
|
||||
env = "INFLUXDB_IOX_INGESTER_ADDRESSES",
|
||||
required = true
|
||||
)]
|
||||
#[clap(long = "ingester-addresses", env = "INFLUXDB_IOX_INGESTER_ADDRESSES")]
|
||||
#[cfg(feature = "rpc_write")]
|
||||
pub ingester_addresses: Vec<String>,
|
||||
|
||||
|
@ -311,21 +298,45 @@ impl QuerierConfig {
|
|||
}
|
||||
}
|
||||
|
||||
/// Return the querier config's ingester addresses.
|
||||
/// Return the querier config's ingester addresses. If `--shard-to-ingesters-file` is used to
|
||||
/// specify a JSON file containing shard to ingester address mappings, this returns `Err` if
|
||||
/// there are any problems reading, deserializing, or interpreting the file.
|
||||
|
||||
// When we have switched to using the RPC write path and remove the rpc_write feature, this
|
||||
// method can be changed to be infallible as clap will handle failure to parse the list of
|
||||
// strings.
|
||||
//
|
||||
// For now, to enable turning on the `rpc_write` feature in tests but not necessarily switching
|
||||
// into the RPC write path mode, require *both* the feature flag to be enabled *and*
|
||||
// `--ingester-addresses` to be set in order to switch. If the `rpc_write` feature is enabled
|
||||
// and `--shard-to-ingesters*` are set, use the write buffer path instead.
|
||||
#[cfg(feature = "rpc_write")]
|
||||
pub fn ingester_addresses(&self) -> Result<IngesterAddresses, Error> {
|
||||
if self.ingester_addresses.is_empty() {
|
||||
Ok(IngesterAddresses::None)
|
||||
} else {
|
||||
if let Some(file) = &self.shard_to_ingesters_file {
|
||||
let contents =
|
||||
fs::read_to_string(file).context(ShardToIngesterFileReadingSnafu { file })?;
|
||||
let map = deserialize_shard_ingester_map(&contents)?;
|
||||
if map.is_empty() {
|
||||
Ok(IngesterAddresses::None)
|
||||
} else {
|
||||
Ok(IngesterAddresses::ByShardIndex(map))
|
||||
}
|
||||
} else if let Some(contents) = &self.shard_to_ingesters {
|
||||
let map = deserialize_shard_ingester_map(contents)?;
|
||||
if map.is_empty() {
|
||||
Ok(IngesterAddresses::None)
|
||||
} else {
|
||||
Ok(IngesterAddresses::ByShardIndex(map))
|
||||
}
|
||||
} else if !self.ingester_addresses.is_empty() {
|
||||
Ok(IngesterAddresses::List(
|
||||
self.ingester_addresses
|
||||
.iter()
|
||||
.map(|s| s.as_str().into())
|
||||
.collect(),
|
||||
))
|
||||
} else {
|
||||
Ok(IngesterAddresses::None)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -357,7 +368,6 @@ impl QuerierConfig {
|
|||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "rpc_write"))]
|
||||
fn deserialize_shard_ingester_map(
|
||||
contents: &str,
|
||||
) -> Result<HashMap<ShardIndex, IngesterMapping>, Error> {
|
||||
|
@ -443,7 +453,6 @@ pub enum IngesterAddresses {
|
|||
|
||||
#[derive(Debug, Deserialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[cfg(not(feature = "rpc_write"))]
|
||||
struct IngestersConfig {
|
||||
#[serde(default)]
|
||||
ignore_all: bool,
|
||||
|
@ -455,7 +464,6 @@ struct IngestersConfig {
|
|||
|
||||
/// Ingester config.
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[cfg(not(feature = "rpc_write"))]
|
||||
pub struct IngesterConfig {
|
||||
addr: Option<Arc<str>>,
|
||||
#[serde(default)]
|
||||
|
@ -464,7 +472,6 @@ pub struct IngesterConfig {
|
|||
|
||||
/// Shard config.
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[cfg(not(feature = "rpc_write"))]
|
||||
pub struct ShardConfig {
|
||||
ingester: Option<Arc<str>>,
|
||||
#[serde(default)]
|
||||
|
@ -472,7 +479,6 @@ pub struct ShardConfig {
|
|||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(not(feature = "rpc_write"))] // These tests won't be relevant after the switch to rpc_write.
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
|
|
@ -81,6 +81,11 @@ message IngesterQueryResponseMetadata {
|
|||
//
|
||||
// This field is currently NOT used by the ingester but will be soon.
|
||||
PartitionStatus status = 8;
|
||||
|
||||
// UUID of this ingester instance.
|
||||
//
|
||||
// This field is currently NOT used by the ingester but will be soon.
|
||||
string ingester_uuid = 9;
|
||||
}
|
||||
|
||||
// Status of a partition that has unpersisted data.
|
||||
|
|
|
@ -411,12 +411,12 @@ impl<T> OneOrMore<T> {
|
|||
}
|
||||
|
||||
/// Returns the first element.
|
||||
pub fn first(&self) -> &T {
|
||||
pub fn head(&self) -> &T {
|
||||
self.contents.first().unwrap()
|
||||
}
|
||||
|
||||
/// Returns any remaining elements.
|
||||
pub fn rest(&self) -> &[T] {
|
||||
/// Returns the remaining elements after [Self::head].
|
||||
pub fn tail(&self) -> &[T] {
|
||||
&self.contents[1..]
|
||||
}
|
||||
|
||||
|
@ -454,10 +454,75 @@ impl<T: Parser> OneOrMore<T> {
|
|||
}
|
||||
}
|
||||
|
||||
/// `ZeroOrMore` is a container for representing zero or more elements of type `T`.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct ZeroOrMore<T> {
|
||||
pub(crate) contents: Vec<T>,
|
||||
}
|
||||
|
||||
impl<T> ZeroOrMore<T> {
|
||||
/// Construct a new `ZeroOrMore<T>` with `contents`.
|
||||
pub fn new(contents: Vec<T>) -> Self {
|
||||
Self { contents }
|
||||
}
|
||||
|
||||
/// Returns the first element or `None` if the container is empty.
|
||||
pub fn head(&self) -> Option<&T> {
|
||||
self.contents.first()
|
||||
}
|
||||
|
||||
/// Returns the remaining elements after [Self::head].
|
||||
pub fn tail(&self) -> &[T] {
|
||||
if self.contents.len() < 2 {
|
||||
&[]
|
||||
} else {
|
||||
&self.contents[1..]
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the total number of elements in the container.
|
||||
pub fn len(&self) -> usize {
|
||||
self.contents.len()
|
||||
}
|
||||
|
||||
/// Returns true if the container has no elements.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.contents.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for ZeroOrMore<T> {
|
||||
type Target = [T];
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.contents
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Parser> ZeroOrMore<T> {
|
||||
/// Parse a list of one or more `T`, separated by commas.
|
||||
///
|
||||
/// Returns an error using `msg` if `separated_list1` fails to parse any elements.
|
||||
pub(crate) fn separated_list1<'a>(
|
||||
msg: &'static str,
|
||||
) -> impl FnMut(&'a str) -> ParseResult<&'a str, Self> {
|
||||
move |i: &str| {
|
||||
map(
|
||||
expect(
|
||||
msg,
|
||||
separated_list1(preceded(ws0, char(',')), preceded(ws0, T::parse)),
|
||||
),
|
||||
Self::new,
|
||||
)(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{assert_error, assert_expect_error};
|
||||
use assert_matches::assert_matches;
|
||||
use nom::character::complete::alphanumeric1;
|
||||
|
||||
impl From<&str> for MeasurementName {
|
||||
|
@ -755,8 +820,8 @@ mod tests {
|
|||
|
||||
impl Display for OneOrMoreString {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
Display::fmt(self.first(), f)?;
|
||||
for arg in self.rest() {
|
||||
Display::fmt(self.head(), f)?;
|
||||
for arg in self.tail() {
|
||||
write!(f, ", {}", arg)?;
|
||||
}
|
||||
Ok(())
|
||||
|
@ -768,15 +833,15 @@ mod tests {
|
|||
fn test_one_or_more() {
|
||||
let (_, got) = OneOrMoreString::separated_list1("Expects one or more")("foo").unwrap();
|
||||
assert_eq!(got.len(), 1);
|
||||
assert_eq!(got.first(), "foo");
|
||||
assert_eq!(got.head(), "foo");
|
||||
assert_eq!(*got, vec!["foo"]); // deref
|
||||
assert_eq!(format!("{}", got), "foo");
|
||||
|
||||
let (_, got) =
|
||||
OneOrMoreString::separated_list1("Expects one or more")("foo , bar,foobar").unwrap();
|
||||
assert_eq!(got.len(), 3);
|
||||
assert_eq!(got.first(), "foo");
|
||||
assert_eq!(got.rest(), vec!["bar", "foobar"]);
|
||||
assert_eq!(got.head(), "foo");
|
||||
assert_eq!(got.tail(), vec!["bar", "foobar"]);
|
||||
assert_eq!(*got, vec!["foo", "bar", "foobar"]); // deref
|
||||
assert_eq!(format!("{}", got), "foo, bar, foobar");
|
||||
|
||||
|
@ -791,6 +856,51 @@ mod tests {
|
|||
OneOrMoreString::new(vec![]);
|
||||
}
|
||||
|
||||
type ZeroOrMoreString = ZeroOrMore<String>;
|
||||
|
||||
impl Display for ZeroOrMoreString {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
if let Some(first) = self.head() {
|
||||
Display::fmt(first, f)?;
|
||||
for arg in self.tail() {
|
||||
write!(f, ", {}", arg)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zero_or_more() {
|
||||
let (_, got) = ZeroOrMoreString::separated_list1("Expects one or more")("foo").unwrap();
|
||||
assert_eq!(got.len(), 1);
|
||||
assert_eq!(got.head().unwrap(), "foo");
|
||||
assert_eq!(*got, vec!["foo"]); // deref
|
||||
assert_eq!(format!("{}", got), "foo");
|
||||
|
||||
let (_, got) =
|
||||
ZeroOrMoreString::separated_list1("Expects one or more")("foo , bar,foobar").unwrap();
|
||||
assert_eq!(got.len(), 3);
|
||||
assert_eq!(got.head().unwrap(), "foo");
|
||||
assert_eq!(got.tail(), vec!["bar", "foobar"]);
|
||||
assert_eq!(*got, vec!["foo", "bar", "foobar"]); // deref
|
||||
assert_eq!(format!("{}", got), "foo, bar, foobar");
|
||||
|
||||
// should not panic
|
||||
let got = ZeroOrMoreString::new(vec![]);
|
||||
assert!(got.is_empty());
|
||||
assert_matches!(got.head(), None);
|
||||
assert_eq!(got.tail().len(), 0);
|
||||
|
||||
// Fallible cases
|
||||
|
||||
assert_expect_error!(
|
||||
OneOrMoreString::separated_list1("Expects one or more")("+"),
|
||||
"Expects one or more"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_comment_single_line() {
|
||||
// Comment to EOF
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
|
||||
use crate::common::{
|
||||
limit_clause, offset_clause, order_by_clause, qualified_measurement_name, where_clause, ws0,
|
||||
ws1, LimitClause, OffsetClause, OneOrMore, OrderByClause, Parser, QualifiedMeasurementName,
|
||||
WhereClause,
|
||||
ws1, LimitClause, OffsetClause, OrderByClause, Parser, QualifiedMeasurementName, WhereClause,
|
||||
ZeroOrMore,
|
||||
};
|
||||
use crate::expression::arithmetic::Expr::Wildcard;
|
||||
use crate::expression::arithmetic::{
|
||||
|
@ -202,14 +202,17 @@ impl Parser for MeasurementSelection {
|
|||
}
|
||||
|
||||
/// Represents a `FROM` clause for a `SELECT` statement.
|
||||
pub type FromMeasurementClause = OneOrMore<MeasurementSelection>;
|
||||
pub type FromMeasurementClause = ZeroOrMore<MeasurementSelection>;
|
||||
|
||||
impl Display for FromMeasurementClause {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "FROM {}", self.first())?;
|
||||
for arg in self.rest() {
|
||||
write!(f, ", {}", arg)?;
|
||||
if let Some(first) = self.head() {
|
||||
write!(f, "FROM {}", first)?;
|
||||
for arg in self.tail() {
|
||||
write!(f, ", {}", arg)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -224,14 +227,17 @@ fn from_clause(i: &str) -> ParseResult<&str, FromMeasurementClause> {
|
|||
}
|
||||
|
||||
/// Represents the collection of dimensions for a `GROUP BY` clause.
|
||||
pub type GroupByClause = OneOrMore<Dimension>;
|
||||
pub type GroupByClause = ZeroOrMore<Dimension>;
|
||||
|
||||
impl Display for GroupByClause {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "GROUP BY {}", self.first())?;
|
||||
for arg in self.rest() {
|
||||
write!(f, ", {}", arg)?;
|
||||
if let Some(first) = self.head() {
|
||||
write!(f, "GROUP BY {}", first)?;
|
||||
for arg in self.tail() {
|
||||
write!(f, ", {}", arg)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -479,14 +485,17 @@ fn wildcard(i: &str) -> ParseResult<&str, Option<WildcardType>> {
|
|||
}
|
||||
|
||||
/// Represents the field projection list of a `SELECT` statement.
|
||||
pub type FieldList = OneOrMore<Field>;
|
||||
pub type FieldList = ZeroOrMore<Field>;
|
||||
|
||||
impl Display for FieldList {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
Display::fmt(self.first(), f)?;
|
||||
for arg in self.rest() {
|
||||
write!(f, ", {}", arg)?;
|
||||
if let Some(first) = self.head() {
|
||||
Display::fmt(first, f)?;
|
||||
for arg in self.tail() {
|
||||
write!(f, ", {}", arg)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -980,6 +989,11 @@ mod test {
|
|||
Field::new_alias(var_ref!("bar"), "foobar".into())
|
||||
])
|
||||
);
|
||||
|
||||
// Fallible cases
|
||||
|
||||
// Unable to parse any valid fields
|
||||
assert_expect_error!(field_list("."), "invalid SELECT statement, expected field");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1022,6 +1036,10 @@ mod test {
|
|||
|
||||
// Fallible cases
|
||||
|
||||
assert_expect_error!(
|
||||
from_clause("FROM"),
|
||||
"invalid FROM clause, expected identifier, regular expression or subquery"
|
||||
);
|
||||
assert_expect_error!(
|
||||
from_clause("FROM 1"),
|
||||
"invalid FROM clause, expected identifier, regular expression or subquery"
|
||||
|
@ -1085,6 +1103,11 @@ mod test {
|
|||
group_by_clause("GROUP time(5m)"),
|
||||
"invalid GROUP BY clause, expected BY"
|
||||
);
|
||||
|
||||
assert_expect_error!(
|
||||
group_by_clause("GROUP BY 1"),
|
||||
"invalid GROUP BY clause, expected wildcard, TIME, identifier or regular expression"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -120,8 +120,8 @@ pub type InList = OneOrMore<Identifier>;
|
|||
|
||||
impl Display for InList {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
Display::fmt(self.first(), f)?;
|
||||
for arg in self.rest() {
|
||||
Display::fmt(self.head(), f)?;
|
||||
for arg in self.tail() {
|
||||
write!(f, ", {}", arg)?;
|
||||
}
|
||||
Ok(())
|
||||
|
|
|
@ -68,8 +68,8 @@ pub type ShowFromClause = FromMeasurementClause<QualifiedMeasurementName>;
|
|||
|
||||
impl Display for ShowFromClause {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "FROM {}", self.first())?;
|
||||
for arg in self.rest() {
|
||||
write!(f, "FROM {}", self.head())?;
|
||||
for arg in self.tail() {
|
||||
write!(f, ", {}", arg)?;
|
||||
}
|
||||
Ok(())
|
||||
|
@ -92,8 +92,8 @@ pub type DeleteFromClause = FromMeasurementClause<MeasurementName>;
|
|||
|
||||
impl Display for DeleteFromClause {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "FROM {}", self.first())?;
|
||||
for arg in self.rest() {
|
||||
write!(f, "FROM {}", self.head())?;
|
||||
for arg in self.tail() {
|
||||
write!(f, ", {}", arg)?;
|
||||
}
|
||||
Ok(())
|
||||
|
|
|
@ -2,24 +2,24 @@
|
|||
source: influxdb_influxql_parser/src/visit.rs
|
||||
expression: "visit_statement!(\"EXPLAIN SELECT * FROM cpu\")"
|
||||
---
|
||||
- "pre_visit_statement: Explain(ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } })"
|
||||
- "pre_visit_explain_statement: ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } }"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_statement: Explain(ExplainStatement { options: None, select: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } })"
|
||||
- "pre_visit_explain_statement: ExplainStatement { options: None, select: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } }"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Wildcard(None), alias: None }"
|
||||
- "pre_visit_expr: Wildcard(None)"
|
||||
- "post_visit_expr: Wildcard(None)"
|
||||
- "post_visit_select_field: Field { expr: Wildcard(None), alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_explain_statement: ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } }"
|
||||
- "post_visit_statement: Explain(ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } })"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_explain_statement: ExplainStatement { options: None, select: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } }"
|
||||
- "post_visit_statement: Explain(ExplainStatement { options: None, select: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } })"
|
||||
|
||||
|
|
|
@ -2,22 +2,22 @@
|
|||
source: influxdb_influxql_parser/src/visit.rs
|
||||
expression: "visit_statement!(r#\"SELECT DISTINCT value FROM temp\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }"
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Distinct(Identifier(\"value\")), alias: None }"
|
||||
- "pre_visit_expr: Distinct(Identifier(\"value\"))"
|
||||
- "post_visit_expr: Distinct(Identifier(\"value\"))"
|
||||
- "post_visit_select_field: Field { expr: Distinct(Identifier(\"value\")), alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
||||
|
|
|
@ -2,24 +2,24 @@
|
|||
source: influxdb_influxql_parser/src/visit.rs
|
||||
expression: "visit_statement!(r#\"SELECT COUNT(value) FROM temp\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }"
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }"
|
||||
- "pre_visit_expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }"
|
||||
- "post_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
||||
|
|
|
@ -2,24 +2,24 @@
|
|||
source: influxdb_influxql_parser/src/visit.rs
|
||||
expression: "visit_statement!(r#\"SELECT COUNT(DISTINCT value) FROM temp\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }"
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }"
|
||||
- "pre_visit_expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }"
|
||||
- "pre_visit_expr: Distinct(Identifier(\"value\"))"
|
||||
- "post_visit_expr: Distinct(Identifier(\"value\"))"
|
||||
- "post_visit_expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }"
|
||||
- "post_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
||||
|
|
|
@ -2,15 +2,15 @@
|
|||
source: influxdb_influxql_parser/src/visit.rs
|
||||
expression: "visit_statement!(r#\"SELECT * FROM /cpu/, memory\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Wildcard(None), alias: None }"
|
||||
- "pre_visit_expr: Wildcard(None)"
|
||||
- "post_visit_expr: Wildcard(None)"
|
||||
- "post_visit_select_field: Field { expr: Wildcard(None), alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }"
|
||||
- "pre_visit_measurement_name: Regex(Regex(\"cpu\"))"
|
||||
|
@ -23,7 +23,7 @@ expression: "visit_statement!(r#\"SELECT * FROM /cpu/, memory\"#)"
|
|||
- "post_visit_measurement_name: Name(Identifier(\"memory\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
||||
|
|
|
@ -1,32 +1,32 @@
|
|||
---
|
||||
source: influxdb_influxql_parser/src/visit.rs
|
||||
expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE host = \"node1\")\n WHERE region =~ /west/ AND value > 5\n GROUP BY TIME(5m), host FILL(previous)\n ORDER BY TIME DESC\n LIMIT 1 OFFSET 2\n SLIMIT 3 SOFFSET 4\n TZ('Australia/Hobart')\n \"#)"
|
||||
expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE host = \"node1\")\n WHERE region =~ /west/ AND value > 5\n GROUP BY TIME(5m), host\n FILL(previous)\n ORDER BY TIME DESC\n LIMIT 1 OFFSET 2\n SLIMIT 3 SOFFSET 4\n TZ('Australia/Hobart')\n \"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }"
|
||||
- "pre_visit_select_measurement_selection: Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }"
|
||||
- "pre_visit_select_measurement_selection: Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"usage\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"usage\"), data_type: None }"
|
||||
- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) }"
|
||||
- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })"
|
||||
|
@ -39,9 +39,9 @@ expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE
|
|||
- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"node1\"), data_type: None })"
|
||||
- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) }"
|
||||
- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_select_measurement_selection: Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_select_measurement_selection: Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }"
|
||||
- "pre_visit_where_clause: WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } }"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }"
|
||||
|
@ -66,14 +66,14 @@ expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE
|
|||
- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) }"
|
||||
- "post_visit_conditional_expression: Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } }"
|
||||
- "post_visit_where_clause: WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })"
|
||||
- "pre_visit_group_by_clause: OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }"
|
||||
- "pre_visit_group_by_clause: ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }"
|
||||
- "pre_visit_select_dimension: Time { interval: Literal(Duration(Duration(300000000000))), offset: None }"
|
||||
- "pre_visit_expr: Literal(Duration(Duration(300000000000)))"
|
||||
- "post_visit_expr: Literal(Duration(Duration(300000000000)))"
|
||||
- "post_visit_select_dimension: Time { interval: Literal(Duration(Duration(300000000000))), offset: None }"
|
||||
- "pre_visit_select_dimension: Tag(Identifier(\"host\"))"
|
||||
- "post_visit_select_dimension: Tag(Identifier(\"host\"))"
|
||||
- "post_visit_group_by_clause: OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }"
|
||||
- "post_visit_group_by_clause: ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }"
|
||||
- "pre_visit_fill_clause: Previous"
|
||||
- "post_visit_fill_clause: Previous"
|
||||
- "pre_visit_order_by_clause: Descending"
|
||||
|
@ -88,6 +88,6 @@ expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE
|
|||
- "post_visit_soffset_clause: SOffsetClause(4)"
|
||||
- "pre_visit_timezone_clause: TimeZoneClause(\"Australia/Hobart\")"
|
||||
- "post_visit_timezone_clause: TimeZoneClause(\"Australia/Hobart\")"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })"
|
||||
|
||||
|
|
|
@ -2,22 +2,22 @@
|
|||
source: influxdb_influxql_parser/src/visit.rs
|
||||
expression: "visit_statement!(r#\"SELECT value FROM temp\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
||||
|
|
|
@ -2,24 +2,24 @@
|
|||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(\"EXPLAIN SELECT * FROM cpu\")"
|
||||
---
|
||||
- "pre_visit_statement: Explain(ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } })"
|
||||
- "pre_visit_explain_statement: ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } }"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_statement: Explain(ExplainStatement { options: None, select: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } })"
|
||||
- "pre_visit_explain_statement: ExplainStatement { options: None, select: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } }"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Wildcard(None), alias: None }"
|
||||
- "pre_visit_expr: Wildcard(None)"
|
||||
- "post_visit_expr: Wildcard(None)"
|
||||
- "post_visit_select_field: Field { expr: Wildcard(None), alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_explain_statement: ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } }"
|
||||
- "post_visit_statement: Explain(ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } })"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_explain_statement: ExplainStatement { options: None, select: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } }"
|
||||
- "post_visit_statement: Explain(ExplainStatement { options: None, select: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } })"
|
||||
|
||||
|
|
|
@ -2,22 +2,22 @@
|
|||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(r#\"SELECT DISTINCT value FROM temp\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }"
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Distinct(Identifier(\"value\")), alias: None }"
|
||||
- "pre_visit_expr: Distinct(Identifier(\"value\"))"
|
||||
- "post_visit_expr: Distinct(Identifier(\"value\"))"
|
||||
- "post_visit_select_field: Field { expr: Distinct(Identifier(\"value\")), alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
||||
|
|
|
@ -2,24 +2,24 @@
|
|||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(r#\"SELECT COUNT(value) FROM temp\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }"
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }"
|
||||
- "pre_visit_expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }"
|
||||
- "post_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
||||
|
|
|
@ -2,24 +2,24 @@
|
|||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(r#\"SELECT COUNT(DISTINCT value) FROM temp\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }"
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }"
|
||||
- "pre_visit_expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }"
|
||||
- "pre_visit_expr: Distinct(Identifier(\"value\"))"
|
||||
- "post_visit_expr: Distinct(Identifier(\"value\"))"
|
||||
- "post_visit_expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }"
|
||||
- "post_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
||||
|
|
|
@ -2,15 +2,15 @@
|
|||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(r#\"SELECT * FROM /cpu/, memory\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: Wildcard(None), alias: None }"
|
||||
- "pre_visit_expr: Wildcard(None)"
|
||||
- "post_visit_expr: Wildcard(None)"
|
||||
- "post_visit_select_field: Field { expr: Wildcard(None), alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }"
|
||||
- "pre_visit_measurement_name: Regex(Regex(\"cpu\"))"
|
||||
|
@ -23,7 +23,7 @@ expression: "visit_statement!(r#\"SELECT * FROM /cpu/, memory\"#)"
|
|||
- "post_visit_measurement_name: Name(Identifier(\"memory\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
||||
|
|
|
@ -2,31 +2,31 @@
|
|||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE host = \"node1\")\n WHERE region =~ /west/ AND value > 5\n GROUP BY TIME(5m), host\n FILL(previous)\n ORDER BY TIME DESC\n LIMIT 1 OFFSET 2\n SLIMIT 3 SOFFSET 4\n TZ('Australia/Hobart')\n \"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }"
|
||||
- "pre_visit_select_measurement_selection: Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }"
|
||||
- "pre_visit_select_measurement_selection: Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"usage\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"usage\"), data_type: None }"
|
||||
- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"cpu\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }"
|
||||
- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) }"
|
||||
- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })"
|
||||
|
@ -39,9 +39,9 @@ expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE
|
|||
- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"node1\"), data_type: None })"
|
||||
- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) }"
|
||||
- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_select_measurement_selection: Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_select_measurement_selection: Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }"
|
||||
- "pre_visit_where_clause: WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } }"
|
||||
- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }"
|
||||
|
@ -66,14 +66,14 @@ expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE
|
|||
- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) }"
|
||||
- "post_visit_conditional_expression: Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } }"
|
||||
- "post_visit_where_clause: WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })"
|
||||
- "pre_visit_group_by_clause: OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }"
|
||||
- "pre_visit_group_by_clause: ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }"
|
||||
- "pre_visit_select_dimension: Time { interval: Literal(Duration(Duration(300000000000))), offset: None }"
|
||||
- "pre_visit_expr: Literal(Duration(Duration(300000000000)))"
|
||||
- "post_visit_expr: Literal(Duration(Duration(300000000000)))"
|
||||
- "post_visit_select_dimension: Time { interval: Literal(Duration(Duration(300000000000))), offset: None }"
|
||||
- "pre_visit_select_dimension: Tag(Identifier(\"host\"))"
|
||||
- "post_visit_select_dimension: Tag(Identifier(\"host\"))"
|
||||
- "post_visit_group_by_clause: OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }"
|
||||
- "post_visit_group_by_clause: ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }"
|
||||
- "pre_visit_fill_clause: Previous"
|
||||
- "post_visit_fill_clause: Previous"
|
||||
- "pre_visit_order_by_clause: Descending"
|
||||
|
@ -88,6 +88,6 @@ expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE
|
|||
- "post_visit_soffset_clause: SOffsetClause(4)"
|
||||
- "pre_visit_timezone_clause: TimeZoneClause(\"Australia/Hobart\")"
|
||||
- "post_visit_timezone_clause: TimeZoneClause(\"Australia/Hobart\")"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })"
|
||||
|
||||
|
|
|
@ -2,22 +2,22 @@
|
|||
source: influxdb_influxql_parser/src/visit_mut.rs
|
||||
expression: "visit_statement!(r#\"SELECT value FROM temp\"#)"
|
||||
---
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }"
|
||||
- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }"
|
||||
- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }"
|
||||
- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }"
|
||||
- "pre_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "pre_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_measurement_name: Name(Identifier(\"temp\"))"
|
||||
- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }"
|
||||
- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })"
|
||||
- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
- "post_visit_select_from_clause: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }"
|
||||
- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }"
|
||||
- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })"
|
||||
|
||||
|
|
|
@ -109,4 +109,4 @@ clippy = []
|
|||
|
||||
# Temporary feature to use the RPC write path instead of the write buffer during the transition
|
||||
# away from using Kafka.
|
||||
rpc_write = ["ioxd_router/rpc_write", "clap_blocks/rpc_write"]
|
||||
rpc_write = ["ioxd_router/rpc_write", "clap_blocks/rpc_write", "test_helpers_end_to_end/rpc_write"]
|
||||
|
|
|
@ -440,11 +440,9 @@ impl Config {
|
|||
};
|
||||
|
||||
let querier_config = QuerierConfig {
|
||||
num_query_threads: None, // will be ignored
|
||||
#[cfg(not(feature = "rpc_write"))]
|
||||
num_query_threads: None, // will be ignored
|
||||
shard_to_ingesters_file: None, // will be ignored
|
||||
#[cfg(not(feature = "rpc_write"))]
|
||||
shard_to_ingesters: None, // will be ignored
|
||||
shard_to_ingesters: None, // will be ignored
|
||||
#[cfg(feature = "rpc_write")]
|
||||
ingester_addresses: vec![], // will be ignored
|
||||
ram_pool_metadata_bytes: querier_ram_pool_metadata_bytes,
|
||||
|
|
|
@ -53,7 +53,8 @@ async fn ingester_flight_api() {
|
|||
partition_id,
|
||||
status: Some(PartitionStatus {
|
||||
parquet_max_sequence_number: None,
|
||||
})
|
||||
}),
|
||||
ingester_uuid: String::new(),
|
||||
},
|
||||
);
|
||||
|
||||
|
@ -87,6 +88,93 @@ async fn ingester_flight_api() {
|
|||
});
|
||||
}
|
||||
|
||||
#[cfg(feature = "rpc_write")]
|
||||
#[tokio::test]
|
||||
async fn ingester2_flight_api() {
|
||||
test_helpers::maybe_start_logging();
|
||||
let database_url = maybe_skip_integration!();
|
||||
|
||||
let table_name = "mytable";
|
||||
|
||||
// Set up cluster
|
||||
let mut cluster = MiniCluster::create_non_shared_rpc_write(database_url).await;
|
||||
|
||||
// Write some data into the v2 HTTP API ==============
|
||||
let lp = format!("{},tag1=A,tag2=B val=42i 123456", table_name);
|
||||
let response = cluster.write_to_router(lp).await;
|
||||
assert_eq!(response.status(), StatusCode::NO_CONTENT);
|
||||
|
||||
let mut querier_flight = influxdb_iox_client::flight::low_level::Client::<
|
||||
influxdb_iox_client::flight::generated_types::IngesterQueryRequest,
|
||||
>::new(cluster.ingester().ingester_grpc_connection(), None);
|
||||
|
||||
let query = IngesterQueryRequest::new(
|
||||
cluster.namespace_id().await,
|
||||
cluster.table_id(table_name).await,
|
||||
vec![],
|
||||
Some(::predicate::EMPTY_PREDICATE),
|
||||
);
|
||||
|
||||
let mut performed_query = querier_flight
|
||||
.perform_query(query.clone().try_into().unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (msg, app_metadata) = performed_query.next().await.unwrap().unwrap();
|
||||
msg.unwrap_none();
|
||||
|
||||
let ingester_uuid = app_metadata.ingester_uuid.clone();
|
||||
assert!(!ingester_uuid.is_empty());
|
||||
|
||||
let (msg, _) = performed_query.next().await.unwrap().unwrap();
|
||||
let schema = msg.unwrap_schema();
|
||||
|
||||
let mut query_results = vec![];
|
||||
while let Some((msg, _md)) = performed_query.next().await.unwrap() {
|
||||
let batch = msg.unwrap_record_batch();
|
||||
query_results.push(batch);
|
||||
}
|
||||
|
||||
let expected = [
|
||||
"+------+------+--------------------------------+-----+",
|
||||
"| tag1 | tag2 | time | val |",
|
||||
"+------+------+--------------------------------+-----+",
|
||||
"| A | B | 1970-01-01T00:00:00.000123456Z | 42 |",
|
||||
"+------+------+--------------------------------+-----+",
|
||||
];
|
||||
assert_batches_sorted_eq!(&expected, &query_results);
|
||||
|
||||
// Also ensure that the schema of the batches matches what is
|
||||
// reported by the performed_query.
|
||||
query_results.iter().enumerate().for_each(|(i, b)| {
|
||||
assert_eq!(
|
||||
schema,
|
||||
b.schema(),
|
||||
"Schema mismatch for returned batch {}",
|
||||
i
|
||||
);
|
||||
});
|
||||
|
||||
// Ensure the ingester UUID is the same in the next query
|
||||
let mut performed_query = querier_flight
|
||||
.perform_query(query.clone().try_into().unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
let (msg, app_metadata) = performed_query.next().await.unwrap().unwrap();
|
||||
msg.unwrap_none();
|
||||
assert_eq!(app_metadata.ingester_uuid, ingester_uuid);
|
||||
|
||||
// Restart the ingester and ensure it gets a new UUID
|
||||
cluster.restart_ingester().await;
|
||||
let mut performed_query = querier_flight
|
||||
.perform_query(query.try_into().unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
let (msg, app_metadata) = performed_query.next().await.unwrap().unwrap();
|
||||
msg.unwrap_none();
|
||||
assert_ne!(app_metadata.ingester_uuid, ingester_uuid);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn ingester_flight_api_namespace_not_found() {
|
||||
test_helpers::maybe_start_logging();
|
||||
|
|
|
@ -55,6 +55,6 @@ mutable_batch_pb = { version = "0.1.0", path = "../mutable_batch_pb" }
|
|||
[dev-dependencies]
|
||||
assert_matches = "1.5.0"
|
||||
lazy_static = "1.4.0"
|
||||
paste = "1.0.9"
|
||||
paste = "1.0.10"
|
||||
test_helpers = { path = "../test_helpers", features = ["future_timeout"] }
|
||||
tokio-stream = {version = "0.1.11", default_features = false }
|
||||
|
|
|
@ -323,6 +323,8 @@ impl Stream for GetStream {
|
|||
.parquet_max_sequence_number
|
||||
.map(|x| x.get()),
|
||||
}),
|
||||
// This is only used in ingester2.
|
||||
ingester_uuid: String::new(),
|
||||
};
|
||||
prost::Message::encode(&app_metadata, &mut bytes)
|
||||
.context(SerializationSnafu)?;
|
||||
|
@ -412,6 +414,8 @@ mod tests {
|
|||
status: Some(proto::PartitionStatus {
|
||||
parquet_max_sequence_number: None,
|
||||
}),
|
||||
// This is only used in ingester2.
|
||||
ingester_uuid: String::new(),
|
||||
},
|
||||
}),
|
||||
Ok(DecodedFlightData {
|
||||
|
@ -453,6 +457,8 @@ mod tests {
|
|||
status: Some(proto::PartitionStatus {
|
||||
parquet_max_sequence_number: None,
|
||||
}),
|
||||
// This is only used in ingester2.
|
||||
ingester_uuid: String::new(),
|
||||
},
|
||||
}),
|
||||
Err(tonic::Code::Internal),
|
||||
|
|
|
@ -53,7 +53,7 @@ criterion = { version = "0.4", default-features = false, features = ["async_toki
|
|||
datafusion_util = { path = "../datafusion_util" }
|
||||
lazy_static = "1.4.0"
|
||||
mutable_batch_lp = { path = "../mutable_batch_lp" }
|
||||
paste = "1.0.9"
|
||||
paste = "1.0.10"
|
||||
tempfile = "3.3.0"
|
||||
test_helpers = { path = "../test_helpers", features = ["future_timeout"] }
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ use crate::{
|
|||
server::grpc::GrpcDelegate,
|
||||
timestamp_oracle::TimestampOracle,
|
||||
wal::{rotate_task::periodic_rotation, wal_sink::WalSink},
|
||||
TRANSITION_SHARD_ID,
|
||||
TRANSITION_SHARD_ID, TRANSITION_SHARD_INDEX,
|
||||
};
|
||||
|
||||
/// Acquire opaque handles to the Ingester RPC service implementations.
|
||||
|
@ -159,6 +159,25 @@ pub async fn new(
|
|||
persist_worker_queue_depth: usize,
|
||||
object_store: ParquetStorage,
|
||||
) -> Result<IngesterGuard<impl IngesterRpcInterface>, InitError> {
|
||||
// Create the transition shard.
|
||||
let mut txn = catalog
|
||||
.start_transaction()
|
||||
.await
|
||||
.expect("start transaction");
|
||||
let topic = txn
|
||||
.topics()
|
||||
.get_by_name("iox-shared")
|
||||
.await
|
||||
.expect("get topic")
|
||||
.unwrap();
|
||||
let s = txn
|
||||
.shards()
|
||||
.create_or_get(&topic, TRANSITION_SHARD_INDEX)
|
||||
.await
|
||||
.expect("create transition shard");
|
||||
assert_eq!(s.id, TRANSITION_SHARD_ID);
|
||||
txn.commit().await.expect("commit transition shard");
|
||||
|
||||
// Initialise the deferred namespace name resolver.
|
||||
let namespace_name_provider: Arc<dyn NamespaceNameProvider> =
|
||||
Arc::new(NamespaceNameResolver::new(
|
||||
|
|
|
@ -11,7 +11,7 @@ use observability_deps::tracing::*;
|
|||
use parking_lot::Mutex;
|
||||
use parquet_file::metadata::IoxMetadata;
|
||||
use schema::sort::SortKey;
|
||||
use tokio::sync::Notify;
|
||||
use tokio::{sync::Notify, time::Instant};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
|
@ -35,14 +35,18 @@ pub(super) struct PersistRequest {
|
|||
complete: Arc<Notify>,
|
||||
partition: Arc<Mutex<PartitionData>>,
|
||||
data: PersistingData,
|
||||
enqueued_at: Instant,
|
||||
}
|
||||
|
||||
impl PersistRequest {
|
||||
/// Construct a [`PersistRequest`] for `data` from `partition`, recording
|
||||
/// the current timestamp as the "enqueued at" point.
|
||||
pub(super) fn new(partition: Arc<Mutex<PartitionData>>, data: PersistingData) -> Self {
|
||||
Self {
|
||||
complete: Arc::new(Notify::default()),
|
||||
partition,
|
||||
data,
|
||||
enqueued_at: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -63,7 +67,6 @@ pub(super) struct Context {
|
|||
partition: Arc<Mutex<PartitionData>>,
|
||||
data: PersistingData,
|
||||
inner: Arc<Inner>,
|
||||
|
||||
/// IDs loaded from the partition at construction time.
|
||||
namespace_id: NamespaceId,
|
||||
table_id: TableId,
|
||||
|
@ -93,6 +96,13 @@ pub(super) struct Context {
|
|||
/// A notification signal to indicate to the caller that this partition has
|
||||
/// persisted.
|
||||
complete: Arc<Notify>,
|
||||
|
||||
/// Timing statistics tracking the timestamp this persist job was first
|
||||
/// enqueued, and the timestamp this [`Context`] was constructed (signifying
|
||||
/// the start of active persist work, as opposed to passive time spent in
|
||||
/// the queue).
|
||||
enqueued_at: Instant,
|
||||
dequeued_at: Instant,
|
||||
}
|
||||
|
||||
impl Context {
|
||||
|
@ -128,6 +138,8 @@ impl Context {
|
|||
sort_key: guard.sort_key().clone(),
|
||||
|
||||
complete,
|
||||
enqueued_at: req.enqueued_at,
|
||||
dequeued_at: Instant::now(),
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -364,6 +376,8 @@ impl Context {
|
|||
// the persisted data will be dropped "shortly".
|
||||
self.partition.lock().mark_persisted(self.data);
|
||||
|
||||
let now = Instant::now();
|
||||
|
||||
info!(
|
||||
%object_store_id,
|
||||
namespace_id = %self.namespace_id,
|
||||
|
@ -372,6 +386,9 @@ impl Context {
|
|||
table_name = %self.table_name,
|
||||
partition_id = %self.partition_id,
|
||||
partition_key = %self.partition_key,
|
||||
total_persist_duration = ?now.duration_since(self.enqueued_at),
|
||||
active_persist_duration = ?now.duration_since(self.dequeued_at),
|
||||
queued_persist_duration = ?self.dequeued_at.duration_since(self.enqueued_at),
|
||||
"persisted partition"
|
||||
);
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ use std::sync::Arc;
|
|||
|
||||
use iox_catalog::interface::Catalog;
|
||||
use iox_query::exec::Executor;
|
||||
use observability_deps::tracing::info;
|
||||
use observability_deps::tracing::{debug, info};
|
||||
use parking_lot::Mutex;
|
||||
use parquet_file::storage::ParquetStorage;
|
||||
use thiserror::Error;
|
||||
|
@ -169,6 +169,11 @@ impl PersistHandle {
|
|||
partition: Arc<Mutex<PartitionData>>,
|
||||
data: PersistingData,
|
||||
) -> Arc<Notify> {
|
||||
debug!(
|
||||
partition_id = data.partition_id().get(),
|
||||
"enqueuing persistence task"
|
||||
);
|
||||
|
||||
// Build the persist task request
|
||||
let r = PersistRequest::new(partition, data);
|
||||
let notify = r.complete_notification();
|
||||
|
|
|
@ -21,6 +21,7 @@ use thiserror::Error;
|
|||
use tokio::sync::{Semaphore, TryAcquireError};
|
||||
use tonic::{Request, Response, Streaming};
|
||||
use trace::{ctx::SpanContext, span::SpanExt};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::query::{response::QueryResponse, QueryError, QueryExec};
|
||||
|
||||
|
@ -107,6 +108,8 @@ pub(crate) struct FlightService<Q> {
|
|||
/// Number of queries rejected due to lack of available `request_sem`
|
||||
/// permit.
|
||||
query_request_limit_rejected: U64Counter,
|
||||
|
||||
ingester_uuid: Uuid,
|
||||
}
|
||||
|
||||
impl<Q> FlightService<Q> {
|
||||
|
@ -126,6 +129,7 @@ impl<Q> FlightService<Q> {
|
|||
query_handler,
|
||||
request_sem: Semaphore::new(max_simultaneous_requests),
|
||||
query_request_limit_rejected,
|
||||
ingester_uuid: Uuid::new_v4(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -197,7 +201,10 @@ where
|
|||
)
|
||||
.await?;
|
||||
|
||||
let output = FlightFrameCodec::new(FlatIngesterQueryResponseStream::from(response));
|
||||
let output = FlightFrameCodec::new(
|
||||
FlatIngesterQueryResponseStream::from(response),
|
||||
self.ingester_uuid,
|
||||
);
|
||||
|
||||
Ok(Response::new(Box::pin(output) as Self::DoGetStream))
|
||||
}
|
||||
|
@ -350,14 +357,16 @@ struct FlightFrameCodec {
|
|||
inner: Pin<Box<dyn Stream<Item = Result<FlatIngesterQueryResponse, ArrowError>> + Send>>,
|
||||
done: bool,
|
||||
buffer: Vec<FlightData>,
|
||||
ingester_uuid: Uuid,
|
||||
}
|
||||
|
||||
impl FlightFrameCodec {
|
||||
fn new(inner: FlatIngesterQueryResponseStream) -> Self {
|
||||
fn new(inner: FlatIngesterQueryResponseStream, ingester_uuid: Uuid) -> Self {
|
||||
Self {
|
||||
inner,
|
||||
done: false,
|
||||
buffer: vec![],
|
||||
ingester_uuid,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -400,6 +409,7 @@ impl Stream for FlightFrameCodec {
|
|||
status: Some(proto::PartitionStatus {
|
||||
parquet_max_sequence_number: status.parquet_max_sequence_number,
|
||||
}),
|
||||
ingester_uuid: this.ingester_uuid.to_string(),
|
||||
};
|
||||
prost::Message::encode(&app_metadata, &mut bytes).map_err(Error::from)?;
|
||||
|
||||
|
@ -460,7 +470,7 @@ mod tests {
|
|||
|
||||
#[tokio::test]
|
||||
async fn test_get_stream_empty() {
|
||||
assert_get_stream(vec![], vec![]).await;
|
||||
assert_get_stream(Uuid::new_v4(), vec![], vec![]).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -470,8 +480,10 @@ mod tests {
|
|||
.to_arrow(Projection::All)
|
||||
.unwrap();
|
||||
let schema = batch.schema();
|
||||
let ingester_uuid = Uuid::new_v4();
|
||||
|
||||
assert_get_stream(
|
||||
ingester_uuid,
|
||||
vec![
|
||||
Ok(FlatIngesterQueryResponse::StartPartition {
|
||||
partition_id: PartitionId::new(1),
|
||||
|
@ -490,6 +502,7 @@ mod tests {
|
|||
status: Some(proto::PartitionStatus {
|
||||
parquet_max_sequence_number: None,
|
||||
}),
|
||||
ingester_uuid: ingester_uuid.to_string(),
|
||||
},
|
||||
}),
|
||||
Ok(DecodedFlightData {
|
||||
|
@ -507,7 +520,9 @@ mod tests {
|
|||
|
||||
#[tokio::test]
|
||||
async fn test_get_stream_shortcuts_err() {
|
||||
let ingester_uuid = Uuid::new_v4();
|
||||
assert_get_stream(
|
||||
ingester_uuid,
|
||||
vec![
|
||||
Ok(FlatIngesterQueryResponse::StartPartition {
|
||||
partition_id: PartitionId::new(1),
|
||||
|
@ -531,6 +546,7 @@ mod tests {
|
|||
status: Some(proto::PartitionStatus {
|
||||
parquet_max_sequence_number: None,
|
||||
}),
|
||||
ingester_uuid: ingester_uuid.to_string(),
|
||||
},
|
||||
}),
|
||||
Err(tonic::Code::Internal),
|
||||
|
@ -547,6 +563,7 @@ mod tests {
|
|||
.unwrap();
|
||||
|
||||
assert_get_stream(
|
||||
Uuid::new_v4(),
|
||||
vec![Ok(FlatIngesterQueryResponse::RecordBatch { batch })],
|
||||
vec![
|
||||
Ok(DecodedFlightData {
|
||||
|
@ -572,11 +589,12 @@ mod tests {
|
|||
}
|
||||
|
||||
async fn assert_get_stream(
|
||||
ingester_uuid: Uuid,
|
||||
inputs: Vec<Result<FlatIngesterQueryResponse, ArrowError>>,
|
||||
expected: Vec<Result<DecodedFlightData, tonic::Code>>,
|
||||
) {
|
||||
let inner = Box::pin(futures::stream::iter(inputs));
|
||||
let stream = FlightFrameCodec::new(inner);
|
||||
let stream = FlightFrameCodec::new(inner, ingester_uuid);
|
||||
let actual: Vec<_> = stream.collect().await;
|
||||
assert_eq!(actual.len(), expected.len());
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use futures::{stream, StreamExt};
|
||||
use observability_deps::tracing::*;
|
||||
use std::{future, sync::Arc, time::Duration};
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::{buffer_tree::BufferTree, persist::handle::PersistHandle};
|
||||
|
||||
|
@ -90,9 +91,17 @@ pub(crate) async fn periodic_rotation(
|
|||
let notifications = stream::iter(buffer.partitions())
|
||||
.filter_map(|p| {
|
||||
async move {
|
||||
let t = Instant::now();
|
||||
|
||||
// Skip this partition if there is no data to persist
|
||||
let data = p.lock().mark_persisting()?;
|
||||
|
||||
debug!(
|
||||
partition_id=data.partition_id().get(),
|
||||
lock_wait=?Instant::now().duration_since(t),
|
||||
"read data for persistence"
|
||||
);
|
||||
|
||||
// Enqueue the partition for persistence.
|
||||
//
|
||||
// The persist task will call mark_persisted() on the partition
|
||||
|
|
|
@ -26,7 +26,7 @@ workspace-hack = { path = "../workspace-hack"}
|
|||
assert_matches = "1.5.0"
|
||||
dotenvy = "0.15.6"
|
||||
mutable_batch_lp = { path = "../mutable_batch_lp" }
|
||||
paste = "1.0.9"
|
||||
paste = "1.0.10"
|
||||
pretty_assertions = "1.3.0"
|
||||
rand = "0.8"
|
||||
tempfile = "3"
|
||||
|
|
|
@ -11,14 +11,18 @@ use cache_system::{
|
|||
loader::{metrics::MetricsLoader, FunctionLoader},
|
||||
resource_consumption::FunctionEstimator,
|
||||
};
|
||||
use data_types::{PartitionId, ShardId};
|
||||
use data_types::{ColumnId, PartitionId, ShardId};
|
||||
use iox_catalog::interface::Catalog;
|
||||
use iox_time::TimeProvider;
|
||||
use schema::sort::SortKey;
|
||||
use std::{collections::HashMap, mem::size_of_val, sync::Arc};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
mem::{size_of, size_of_val},
|
||||
sync::Arc,
|
||||
};
|
||||
use trace::span::Span;
|
||||
|
||||
use super::ram::RamSize;
|
||||
use super::{namespace::CachedTable, ram::RamSize};
|
||||
|
||||
const CACHE_ID: &str = "partition";
|
||||
|
||||
|
@ -26,7 +30,7 @@ type CacheT = Box<
|
|||
dyn Cache<
|
||||
K = PartitionId,
|
||||
V = CachedPartition,
|
||||
GetExtra = ((), Option<Span>),
|
||||
GetExtra = (Arc<CachedTable>, Option<Span>),
|
||||
PeekExtra = ((), Option<Span>),
|
||||
>,
|
||||
>;
|
||||
|
@ -48,30 +52,35 @@ impl PartitionCache {
|
|||
ram_pool: Arc<ResourcePool<RamSize>>,
|
||||
testing: bool,
|
||||
) -> Self {
|
||||
let loader = FunctionLoader::new(move |partition_id: PartitionId, _extra: ()| {
|
||||
let catalog = Arc::clone(&catalog);
|
||||
let backoff_config = backoff_config.clone();
|
||||
let loader =
|
||||
FunctionLoader::new(move |partition_id: PartitionId, extra: Arc<CachedTable>| {
|
||||
let catalog = Arc::clone(&catalog);
|
||||
let backoff_config = backoff_config.clone();
|
||||
|
||||
async move {
|
||||
let partition = Backoff::new(&backoff_config)
|
||||
.retry_all_errors("get partition_key", || async {
|
||||
catalog
|
||||
.repositories()
|
||||
.await
|
||||
.partitions()
|
||||
.get_by_id(partition_id)
|
||||
.await
|
||||
})
|
||||
.await
|
||||
.expect("retry forever")
|
||||
.expect("partition gone from catalog?!");
|
||||
async move {
|
||||
let partition = Backoff::new(&backoff_config)
|
||||
.retry_all_errors("get partition_key", || async {
|
||||
catalog
|
||||
.repositories()
|
||||
.await
|
||||
.partitions()
|
||||
.get_by_id(partition_id)
|
||||
.await
|
||||
})
|
||||
.await
|
||||
.expect("retry forever")
|
||||
.expect("partition gone from catalog?!");
|
||||
|
||||
CachedPartition {
|
||||
shard_id: partition.shard_id,
|
||||
sort_key: Arc::new(partition.sort_key()),
|
||||
let sort_key = partition.sort_key().map(|sort_key| {
|
||||
Arc::new(PartitionSortKey::new(sort_key, &extra.column_id_map_rev))
|
||||
});
|
||||
|
||||
CachedPartition {
|
||||
shard_id: partition.shard_id,
|
||||
sort_key,
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
let loader = Arc::new(MetricsLoader::new(
|
||||
loader,
|
||||
CACHE_ID,
|
||||
|
@ -107,8 +116,16 @@ impl PartitionCache {
|
|||
}
|
||||
|
||||
/// Get shard ID.
|
||||
pub async fn shard_id(&self, partition_id: PartitionId, span: Option<Span>) -> ShardId {
|
||||
self.cache.get(partition_id, ((), span)).await.shard_id
|
||||
pub async fn shard_id(
|
||||
&self,
|
||||
cached_table: Arc<CachedTable>,
|
||||
partition_id: PartitionId,
|
||||
span: Option<Span>,
|
||||
) -> ShardId {
|
||||
self.cache
|
||||
.get(partition_id, (cached_table, span))
|
||||
.await
|
||||
.shard_id
|
||||
}
|
||||
|
||||
/// Get sort key
|
||||
|
@ -116,23 +133,26 @@ impl PartitionCache {
|
|||
/// Expire partition if the cached sort key does NOT cover the given set of columns.
|
||||
pub async fn sort_key(
|
||||
&self,
|
||||
cached_table: Arc<CachedTable>,
|
||||
partition_id: PartitionId,
|
||||
should_cover: &[&str],
|
||||
should_cover: &[ColumnId],
|
||||
span: Option<Span>,
|
||||
) -> Arc<Option<SortKey>> {
|
||||
) -> Option<Arc<PartitionSortKey>> {
|
||||
self.remove_if_handle
|
||||
.remove_if_and_get(
|
||||
&self.cache,
|
||||
partition_id,
|
||||
|cached_partition| {
|
||||
if let Some(sort_key) = cached_partition.sort_key.as_ref().as_ref() {
|
||||
should_cover.iter().any(|col| !sort_key.contains(col))
|
||||
if let Some(sort_key) = &cached_partition.sort_key {
|
||||
should_cover
|
||||
.iter()
|
||||
.any(|col| !sort_key.column_set.contains(col))
|
||||
} else {
|
||||
// no sort key at all => need to update if there is anything to cover
|
||||
!should_cover.is_empty()
|
||||
}
|
||||
},
|
||||
((), span),
|
||||
(cached_table, span),
|
||||
)
|
||||
.await
|
||||
.sort_key
|
||||
|
@ -142,28 +162,67 @@ impl PartitionCache {
|
|||
#[derive(Debug, Clone)]
|
||||
struct CachedPartition {
|
||||
shard_id: ShardId,
|
||||
sort_key: Arc<Option<SortKey>>,
|
||||
sort_key: Option<Arc<PartitionSortKey>>,
|
||||
}
|
||||
|
||||
impl CachedPartition {
|
||||
/// RAM-bytes EXCLUDING `self`.
|
||||
fn size(&self) -> usize {
|
||||
// Arc heap allocation
|
||||
size_of_val(self.sort_key.as_ref()) +
|
||||
// Arc content
|
||||
self.sort_key
|
||||
.as_ref()
|
||||
.as_ref()
|
||||
.map(|sk| sk.size() - size_of_val(sk))
|
||||
.map(|sk| sk.size())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct PartitionSortKey {
|
||||
pub sort_key: Arc<SortKey>,
|
||||
pub column_set: HashSet<ColumnId>,
|
||||
pub column_order: Vec<ColumnId>,
|
||||
}
|
||||
|
||||
impl PartitionSortKey {
|
||||
fn new(sort_key: SortKey, column_id_map_rev: &HashMap<Arc<str>, ColumnId>) -> Self {
|
||||
let sort_key = Arc::new(sort_key);
|
||||
|
||||
let mut column_order: Vec<ColumnId> = sort_key
|
||||
.iter()
|
||||
.map(|(name, _opts)| {
|
||||
*column_id_map_rev
|
||||
.get(name.as_ref())
|
||||
.unwrap_or_else(|| panic!("column_id_map_rev misses data: {name}"))
|
||||
})
|
||||
.collect();
|
||||
column_order.shrink_to_fit();
|
||||
|
||||
let mut column_set: HashSet<ColumnId> = column_order.iter().copied().collect();
|
||||
column_set.shrink_to_fit();
|
||||
|
||||
Self {
|
||||
sort_key,
|
||||
column_set,
|
||||
column_order,
|
||||
}
|
||||
}
|
||||
|
||||
/// Size of this object in bytes, including `self`.
|
||||
fn size(&self) -> usize {
|
||||
size_of_val(self)
|
||||
+ self.sort_key.as_ref().size()
|
||||
+ (self.column_set.capacity() * size_of::<ColumnId>())
|
||||
+ (self.column_order.capacity() * size_of::<ColumnId>())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::cache::{ram::test_util::test_ram_pool, test_util::assert_histogram_metric_count};
|
||||
use data_types::ColumnType;
|
||||
use iox_tests::util::TestCatalog;
|
||||
use schema::{Schema, SchemaBuilder};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_shard_id() {
|
||||
|
@ -185,6 +244,13 @@ mod tests {
|
|||
.await
|
||||
.partition
|
||||
.clone();
|
||||
let cached_table = Arc::new(CachedTable {
|
||||
id: t.table.id,
|
||||
schema: schema(),
|
||||
column_id_map: HashMap::default(),
|
||||
column_id_map_rev: HashMap::default(),
|
||||
primary_key_column_ids: vec![],
|
||||
});
|
||||
|
||||
let cache = PartitionCache::new(
|
||||
catalog.catalog(),
|
||||
|
@ -195,15 +261,15 @@ mod tests {
|
|||
true,
|
||||
);
|
||||
|
||||
let id1 = cache.shard_id(p1.id, None).await;
|
||||
let id1 = cache.shard_id(Arc::clone(&cached_table), p1.id, None).await;
|
||||
assert_eq!(id1, s1.shard.id);
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 1);
|
||||
|
||||
let id2 = cache.shard_id(p2.id, None).await;
|
||||
let id2 = cache.shard_id(Arc::clone(&cached_table), p2.id, None).await;
|
||||
assert_eq!(id2, s2.shard.id);
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 2);
|
||||
|
||||
let id1 = cache.shard_id(p1.id, None).await;
|
||||
let id1 = cache.shard_id(Arc::clone(&cached_table), p1.id, None).await;
|
||||
assert_eq!(id1, s1.shard.id);
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 2);
|
||||
}
|
||||
|
@ -214,6 +280,8 @@ mod tests {
|
|||
|
||||
let ns = catalog.create_namespace_1hr_retention("ns").await;
|
||||
let t = ns.create_table("table").await;
|
||||
let c1 = t.create_column("tag", ColumnType::Tag).await;
|
||||
let c2 = t.create_column("time", ColumnType::Time).await;
|
||||
let s1 = ns.create_shard(1).await;
|
||||
let s2 = ns.create_shard(2).await;
|
||||
let p1 = t
|
||||
|
@ -228,6 +296,19 @@ mod tests {
|
|||
.await
|
||||
.partition
|
||||
.clone();
|
||||
let cached_table = Arc::new(CachedTable {
|
||||
id: t.table.id,
|
||||
schema: schema(),
|
||||
column_id_map: HashMap::from([
|
||||
(c1.column.id, Arc::from(c1.column.name.clone())),
|
||||
(c2.column.id, Arc::from(c2.column.name.clone())),
|
||||
]),
|
||||
column_id_map_rev: HashMap::from([
|
||||
(Arc::from(c1.column.name.clone()), c1.column.id),
|
||||
(Arc::from(c2.column.name.clone()), c2.column.id),
|
||||
]),
|
||||
primary_key_column_ids: vec![c1.column.id, c2.column.id],
|
||||
});
|
||||
|
||||
let cache = PartitionCache::new(
|
||||
catalog.catalog(),
|
||||
|
@ -238,16 +319,32 @@ mod tests {
|
|||
true,
|
||||
);
|
||||
|
||||
let sort_key1 = cache.sort_key(p1.id, &Vec::new(), None).await;
|
||||
assert_eq!(sort_key1.as_ref(), &p1.sort_key());
|
||||
let sort_key1a = cache
|
||||
.sort_key(Arc::clone(&cached_table), p1.id, &Vec::new(), None)
|
||||
.await;
|
||||
assert_eq!(
|
||||
sort_key1a.as_ref().unwrap().as_ref(),
|
||||
&PartitionSortKey {
|
||||
sort_key: Arc::new(p1.sort_key().unwrap()),
|
||||
column_set: HashSet::from([c1.column.id, c2.column.id]),
|
||||
column_order: vec![c1.column.id, c2.column.id],
|
||||
}
|
||||
);
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 1);
|
||||
|
||||
let sort_key2 = cache.sort_key(p2.id, &Vec::new(), None).await;
|
||||
assert_eq!(sort_key2.as_ref(), &p2.sort_key());
|
||||
let sort_key2 = cache
|
||||
.sort_key(Arc::clone(&cached_table), p2.id, &Vec::new(), None)
|
||||
.await;
|
||||
assert_eq!(sort_key2, None);
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 2);
|
||||
|
||||
let sort_key1 = cache.sort_key(p1.id, &Vec::new(), None).await;
|
||||
assert_eq!(sort_key1.as_ref(), &p1.sort_key());
|
||||
let sort_key1b = cache
|
||||
.sort_key(Arc::clone(&cached_table), p1.id, &Vec::new(), None)
|
||||
.await;
|
||||
assert!(Arc::ptr_eq(
|
||||
sort_key1a.as_ref().unwrap(),
|
||||
sort_key1b.as_ref().unwrap()
|
||||
));
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 2);
|
||||
}
|
||||
|
||||
|
@ -257,6 +354,8 @@ mod tests {
|
|||
|
||||
let ns = catalog.create_namespace_1hr_retention("ns").await;
|
||||
let t = ns.create_table("table").await;
|
||||
let c1 = t.create_column("tag", ColumnType::Tag).await;
|
||||
let c2 = t.create_column("time", ColumnType::Time).await;
|
||||
let s1 = ns.create_shard(1).await;
|
||||
let s2 = ns.create_shard(2).await;
|
||||
let p1 = t
|
||||
|
@ -277,6 +376,19 @@ mod tests {
|
|||
.await
|
||||
.partition
|
||||
.clone();
|
||||
let cached_table = Arc::new(CachedTable {
|
||||
id: t.table.id,
|
||||
schema: schema(),
|
||||
column_id_map: HashMap::from([
|
||||
(c1.column.id, Arc::from(c1.column.name.clone())),
|
||||
(c2.column.id, Arc::from(c2.column.name.clone())),
|
||||
]),
|
||||
column_id_map_rev: HashMap::from([
|
||||
(Arc::from(c1.column.name.clone()), c1.column.id),
|
||||
(Arc::from(c2.column.name.clone()), c2.column.id),
|
||||
]),
|
||||
primary_key_column_ids: vec![c1.column.id, c2.column.id],
|
||||
});
|
||||
|
||||
let cache = PartitionCache::new(
|
||||
catalog.catalog(),
|
||||
|
@ -287,16 +399,22 @@ mod tests {
|
|||
true,
|
||||
);
|
||||
|
||||
cache.shard_id(p2.id, None).await;
|
||||
cache.sort_key(p3.id, &Vec::new(), None).await;
|
||||
cache.shard_id(Arc::clone(&cached_table), p2.id, None).await;
|
||||
cache
|
||||
.sort_key(Arc::clone(&cached_table), p3.id, &Vec::new(), None)
|
||||
.await;
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 2);
|
||||
|
||||
cache.shard_id(p1.id, None).await;
|
||||
cache.sort_key(p2.id, &Vec::new(), None).await;
|
||||
cache.shard_id(Arc::clone(&cached_table), p1.id, None).await;
|
||||
cache
|
||||
.sort_key(Arc::clone(&cached_table), p2.id, &Vec::new(), None)
|
||||
.await;
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 3);
|
||||
|
||||
cache.sort_key(p1.id, &Vec::new(), None).await;
|
||||
cache.shard_id(p2.id, None).await;
|
||||
cache
|
||||
.sort_key(Arc::clone(&cached_table), p1.id, &Vec::new(), None)
|
||||
.await;
|
||||
cache.shard_id(Arc::clone(&cached_table), p2.id, None).await;
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 3);
|
||||
}
|
||||
|
||||
|
@ -306,10 +424,25 @@ mod tests {
|
|||
|
||||
let ns = catalog.create_namespace_1hr_retention("ns").await;
|
||||
let t = ns.create_table("table").await;
|
||||
let c1 = t.create_column("foo", ColumnType::Tag).await;
|
||||
let c2 = t.create_column("time", ColumnType::Time).await;
|
||||
let s = ns.create_shard(1).await;
|
||||
let p = t.with_shard(&s).create_partition("k1").await;
|
||||
let p_id = p.partition.id;
|
||||
let p_sort_key = p.partition.sort_key();
|
||||
let cached_table = Arc::new(CachedTable {
|
||||
id: t.table.id,
|
||||
schema: schema(),
|
||||
column_id_map: HashMap::from([
|
||||
(c1.column.id, Arc::from(c1.column.name.clone())),
|
||||
(c2.column.id, Arc::from(c2.column.name.clone())),
|
||||
]),
|
||||
column_id_map_rev: HashMap::from([
|
||||
(Arc::from(c1.column.name.clone()), c1.column.id),
|
||||
(Arc::from(c2.column.name.clone()), c2.column.id),
|
||||
]),
|
||||
primary_key_column_ids: vec![c1.column.id, c2.column.id],
|
||||
});
|
||||
|
||||
let cache = PartitionCache::new(
|
||||
catalog.catalog(),
|
||||
|
@ -320,42 +453,86 @@ mod tests {
|
|||
true,
|
||||
);
|
||||
|
||||
let sort_key = cache.sort_key(p_id, &Vec::new(), None).await;
|
||||
assert_eq!(sort_key.as_ref(), &p_sort_key);
|
||||
let sort_key = cache
|
||||
.sort_key(Arc::clone(&cached_table), p_id, &Vec::new(), None)
|
||||
.await;
|
||||
assert_eq!(sort_key, None,);
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 1);
|
||||
|
||||
// requesting nother will not expire
|
||||
assert!(p_sort_key.is_none());
|
||||
let sort_key = cache.sort_key(p_id, &Vec::new(), None).await;
|
||||
assert_eq!(sort_key.as_ref(), &p_sort_key);
|
||||
let sort_key = cache
|
||||
.sort_key(Arc::clone(&cached_table), p_id, &Vec::new(), None)
|
||||
.await;
|
||||
assert_eq!(sort_key, None,);
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 1);
|
||||
|
||||
// but requesting something will expire
|
||||
let sort_key = cache.sort_key(p_id, &["foo"], None).await;
|
||||
assert_eq!(sort_key.as_ref(), &p_sort_key);
|
||||
let sort_key = cache
|
||||
.sort_key(Arc::clone(&cached_table), p_id, &[c1.column.id], None)
|
||||
.await;
|
||||
assert_eq!(sort_key, None,);
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 2);
|
||||
|
||||
// set sort key
|
||||
let p = p
|
||||
.update_sort_key(SortKey::from_columns(["foo", "bar"]))
|
||||
.update_sort_key(SortKey::from_columns([
|
||||
c1.column.name.as_str(),
|
||||
c2.column.name.as_str(),
|
||||
]))
|
||||
.await;
|
||||
|
||||
// expire & fetch
|
||||
let p_sort_key = p.partition.sort_key();
|
||||
let sort_key = cache.sort_key(p_id, &["foo"], None).await;
|
||||
assert_eq!(sort_key.as_ref(), &p_sort_key);
|
||||
let sort_key = cache
|
||||
.sort_key(Arc::clone(&cached_table), p_id, &[c1.column.id], None)
|
||||
.await;
|
||||
assert_eq!(
|
||||
sort_key.as_ref().unwrap().as_ref(),
|
||||
&PartitionSortKey {
|
||||
sort_key: Arc::new(p_sort_key.clone().unwrap()),
|
||||
column_set: HashSet::from([c1.column.id, c2.column.id]),
|
||||
column_order: vec![c1.column.id, c2.column.id],
|
||||
}
|
||||
);
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 3);
|
||||
|
||||
// subsets and the full key don't expire
|
||||
for should_cover in [Vec::new(), vec!["foo"], vec!["bar"], vec!["foo", "bar"]] {
|
||||
let sort_key = cache.sort_key(p_id, &should_cover, None).await;
|
||||
assert_eq!(sort_key.as_ref(), &p_sort_key);
|
||||
for should_cover in [
|
||||
Vec::new(),
|
||||
vec![c1.column.id],
|
||||
vec![c2.column.id],
|
||||
vec![c1.column.id, c2.column.id],
|
||||
] {
|
||||
let sort_key_2 = cache
|
||||
.sort_key(Arc::clone(&cached_table), p_id, &should_cover, None)
|
||||
.await;
|
||||
assert!(Arc::ptr_eq(
|
||||
sort_key.as_ref().unwrap(),
|
||||
sort_key_2.as_ref().unwrap()
|
||||
));
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 3);
|
||||
}
|
||||
|
||||
// unknown columns expire
|
||||
let sort_key = cache.sort_key(p_id, &["foo", "x"], None).await;
|
||||
assert_eq!(sort_key.as_ref(), &p_sort_key);
|
||||
let c3 = t.create_column("x", ColumnType::Tag).await;
|
||||
let sort_key_2 = cache
|
||||
.sort_key(
|
||||
Arc::clone(&cached_table),
|
||||
p_id,
|
||||
&[c1.column.id, c3.column.id],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
assert!(!Arc::ptr_eq(
|
||||
sort_key.as_ref().unwrap(),
|
||||
sort_key_2.as_ref().unwrap()
|
||||
));
|
||||
assert_eq!(sort_key, sort_key_2);
|
||||
assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 4);
|
||||
}
|
||||
|
||||
fn schema() -> Arc<Schema> {
|
||||
Arc::new(SchemaBuilder::new().build().unwrap())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ use iox_catalog::interface::Catalog;
|
|||
use iox_query::util::create_basic_summary;
|
||||
use parquet_file::chunk::ParquetChunk;
|
||||
use schema::{sort::SortKey, Schema};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use std::{collections::HashSet, sync::Arc};
|
||||
use trace::span::{Span, SpanRecorder};
|
||||
use uuid::Uuid;
|
||||
|
||||
|
@ -93,7 +93,7 @@ pub struct QuerierChunk {
|
|||
delete_predicates: Vec<Arc<DeletePredicate>>,
|
||||
|
||||
/// Partition sort key (how does the read buffer use this?)
|
||||
partition_sort_key: Arc<Option<SortKey>>,
|
||||
partition_sort_key: Option<Arc<SortKey>>,
|
||||
|
||||
/// Chunk of the Parquet file
|
||||
parquet_chunk: Arc<ParquetChunk>,
|
||||
|
@ -107,7 +107,7 @@ impl QuerierChunk {
|
|||
pub fn new(
|
||||
parquet_chunk: Arc<ParquetChunk>,
|
||||
meta: Arc<ChunkMeta>,
|
||||
partition_sort_key: Arc<Option<SortKey>>,
|
||||
partition_sort_key: Option<Arc<SortKey>>,
|
||||
) -> Self {
|
||||
let schema = parquet_chunk.schema();
|
||||
|
||||
|
@ -141,7 +141,7 @@ impl QuerierChunk {
|
|||
}
|
||||
|
||||
/// Set partition sort key
|
||||
pub fn with_partition_sort_key(self, partition_sort_key: Arc<Option<SortKey>>) -> Self {
|
||||
pub fn with_partition_sort_key(self, partition_sort_key: Option<Arc<SortKey>>) -> Self {
|
||||
Self {
|
||||
partition_sort_key,
|
||||
..self
|
||||
|
@ -227,37 +227,26 @@ impl ChunkAdapter {
|
|||
) -> Option<ChunkParts> {
|
||||
let span_recorder = SpanRecorder::new(span);
|
||||
|
||||
let parquet_file_cols: HashMap<ColumnId, &str> = parquet_file
|
||||
.column_set
|
||||
.iter()
|
||||
.map(|id| {
|
||||
let name = cached_table
|
||||
.column_id_map
|
||||
.get(id)
|
||||
.expect("catalog has all columns")
|
||||
.as_ref();
|
||||
(*id, name)
|
||||
})
|
||||
.collect();
|
||||
let parquet_file_cols: HashSet<ColumnId> =
|
||||
parquet_file.column_set.iter().copied().collect();
|
||||
|
||||
// relevant_pk_columns is everything from the primary key for the table, that is actually in this parquet file
|
||||
let relevant_pk_columns: Vec<_> = cached_table
|
||||
.primary_key_column_ids
|
||||
.iter()
|
||||
.filter_map(|c| parquet_file_cols.get(c).copied())
|
||||
.filter(|c| parquet_file_cols.contains(c))
|
||||
.copied()
|
||||
.collect();
|
||||
let partition_sort_key = self
|
||||
.catalog_cache
|
||||
.partition()
|
||||
.sort_key(
|
||||
Arc::clone(&cached_table),
|
||||
parquet_file.partition_id,
|
||||
&relevant_pk_columns,
|
||||
span_recorder.child_span("cache GET partition sort key"),
|
||||
)
|
||||
.await;
|
||||
let partition_sort_key_ref = partition_sort_key
|
||||
.as_ref()
|
||||
.as_ref()
|
||||
.await
|
||||
.expect("partition sort key should be set when a parquet file exists");
|
||||
|
||||
// NOTE: Because we've looked up the sort key AFTER the namespace schema, it may contain columns for which we
|
||||
|
@ -272,22 +261,28 @@ impl ChunkAdapter {
|
|||
let column_ids: Vec<_> = cached_table
|
||||
.column_id_map
|
||||
.keys()
|
||||
.filter(|id| parquet_file_cols.contains_key(id))
|
||||
.filter(|id| parquet_file_cols.contains(id))
|
||||
.copied()
|
||||
.collect();
|
||||
let schema = self
|
||||
.catalog_cache
|
||||
.projected_schema()
|
||||
.get(
|
||||
cached_table,
|
||||
Arc::clone(&cached_table),
|
||||
column_ids,
|
||||
span_recorder.child_span("cache GET projected schema"),
|
||||
)
|
||||
.await;
|
||||
|
||||
// calculate sort key
|
||||
let pk_cols = schema.primary_key();
|
||||
let sort_key = partition_sort_key_ref.filter_to(&pk_cols, parquet_file.partition_id.get());
|
||||
let sort_key = SortKey::from_columns(
|
||||
partition_sort_key
|
||||
.column_order
|
||||
.iter()
|
||||
.filter(|c_id| parquet_file_cols.contains(c_id))
|
||||
.filter_map(|c_id| cached_table.column_id_map.get(c_id))
|
||||
.cloned(),
|
||||
);
|
||||
assert!(
|
||||
!sort_key.is_empty(),
|
||||
"Sort key can never be empty because there should at least be a time column",
|
||||
|
@ -311,7 +306,7 @@ impl ChunkAdapter {
|
|||
Some(ChunkParts {
|
||||
meta,
|
||||
schema,
|
||||
partition_sort_key,
|
||||
partition_sort_key: Some(Arc::clone(&partition_sort_key.sort_key)),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -319,7 +314,7 @@ impl ChunkAdapter {
|
|||
struct ChunkParts {
|
||||
meta: Arc<ChunkMeta>,
|
||||
schema: Arc<Schema>,
|
||||
partition_sort_key: Arc<Option<SortKey>>,
|
||||
partition_sort_key: Option<Arc<SortKey>>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -414,6 +409,7 @@ pub mod tests {
|
|||
table.create_column("tag1", ColumnType::Tag).await;
|
||||
table.create_column("tag2", ColumnType::Tag).await;
|
||||
table.create_column("tag3", ColumnType::Tag).await;
|
||||
table.create_column("tag4", ColumnType::Tag).await;
|
||||
table.create_column("field_int", ColumnType::I64).await;
|
||||
table.create_column("field_float", ColumnType::F64).await;
|
||||
table.create_column("time", ColumnType::Time).await;
|
||||
|
|
|
@ -19,7 +19,7 @@ impl QueryChunkMeta for QuerierChunk {
|
|||
}
|
||||
|
||||
fn partition_sort_key(&self) -> Option<&SortKey> {
|
||||
self.partition_sort_key.as_ref().as_ref()
|
||||
self.partition_sort_key.as_ref().map(|sk| sk.as_ref())
|
||||
}
|
||||
|
||||
fn partition_id(&self) -> PartitionId {
|
||||
|
|
|
@ -3,14 +3,14 @@ use self::{
|
|||
flight_client::{Error as FlightClientError, FlightClient, FlightClientImpl, FlightError},
|
||||
test_util::MockIngesterConnection,
|
||||
};
|
||||
use crate::cache::CatalogCache;
|
||||
use crate::cache::{namespace::CachedTable, CatalogCache};
|
||||
use arrow::{datatypes::DataType, error::ArrowError, record_batch::RecordBatch};
|
||||
use async_trait::async_trait;
|
||||
use backoff::{Backoff, BackoffConfig, BackoffError};
|
||||
use client_util::connection;
|
||||
use data_types::{
|
||||
ChunkId, ChunkOrder, IngesterMapping, NamespaceId, PartitionId, SequenceNumber, ShardId,
|
||||
ShardIndex, TableId, TableSummary, TimestampMinMax,
|
||||
ShardIndex, TableSummary, TimestampMinMax,
|
||||
};
|
||||
use datafusion::error::DataFusionError;
|
||||
use futures::{stream::FuturesUnordered, TryStreamExt};
|
||||
|
@ -206,10 +206,9 @@ pub trait IngesterConnection: std::fmt::Debug + Send + Sync + 'static {
|
|||
&self,
|
||||
shard_indexes: Option<Vec<ShardIndex>>,
|
||||
namespace_id: NamespaceId,
|
||||
table_id: TableId,
|
||||
cached_table: Arc<CachedTable>,
|
||||
columns: Vec<String>,
|
||||
predicate: &Predicate,
|
||||
expected_schema: Arc<Schema>,
|
||||
span: Option<Span>,
|
||||
) -> Result<Vec<IngesterPartition>>;
|
||||
|
||||
|
@ -328,7 +327,7 @@ impl<'a> Drop for ObserveIngesterRequest<'a> {
|
|||
debug!(
|
||||
predicate=?self.request.predicate,
|
||||
namespace_id=self.request.namespace_id.get(),
|
||||
table_id=self.request.table_id.get(),
|
||||
table_id=self.request.cached_table.id.get(),
|
||||
n_partitions=?ok_status.map(|s| s.n_partitions),
|
||||
n_chunks=?ok_status.map(|s| s.n_chunks),
|
||||
n_rows=?ok_status.map(|s| s.n_rows),
|
||||
|
@ -460,10 +459,9 @@ struct GetPartitionForIngester<'a> {
|
|||
catalog_cache: Arc<CatalogCache>,
|
||||
ingester_address: Arc<str>,
|
||||
namespace_id: NamespaceId,
|
||||
table_id: TableId,
|
||||
columns: Vec<String>,
|
||||
predicate: &'a Predicate,
|
||||
expected_schema: Arc<Schema>,
|
||||
cached_table: Arc<CachedTable>,
|
||||
}
|
||||
|
||||
/// Fetches the partitions for a single ingester
|
||||
|
@ -476,15 +474,14 @@ async fn execute(
|
|||
catalog_cache,
|
||||
ingester_address,
|
||||
namespace_id,
|
||||
table_id,
|
||||
columns,
|
||||
predicate,
|
||||
expected_schema,
|
||||
cached_table,
|
||||
} = request;
|
||||
|
||||
let ingester_query_request = IngesterQueryRequest {
|
||||
namespace_id,
|
||||
table_id,
|
||||
table_id: cached_table.id,
|
||||
columns: columns.clone(),
|
||||
predicate: Some(predicate.clone()),
|
||||
};
|
||||
|
@ -502,7 +499,7 @@ async fn execute(
|
|||
warn!(
|
||||
ingester_address = ingester_address.as_ref(),
|
||||
namespace_id = namespace_id.get(),
|
||||
table_id = table_id.get(),
|
||||
table_id = cached_table.id.get(),
|
||||
"Could not connect to ingester, circuit broken",
|
||||
);
|
||||
return Ok(vec![]);
|
||||
|
@ -513,7 +510,7 @@ async fn execute(
|
|||
debug!(
|
||||
ingester_address = ingester_address.as_ref(),
|
||||
namespace_id = namespace_id.get(),
|
||||
table_id = table_id.get(),
|
||||
table_id = cached_table.id.get(),
|
||||
"Ingester does not know namespace or table, skipping",
|
||||
);
|
||||
return Ok(vec![]);
|
||||
|
@ -531,7 +528,7 @@ async fn execute(
|
|||
e=%e,
|
||||
ingester_address=ingester_address.as_ref(),
|
||||
namespace_id=namespace_id.get(),
|
||||
table_id=table_id.get(),
|
||||
table_id=cached_table.id.get(),
|
||||
columns=columns.join(",").as_str(),
|
||||
predicate_str=%predicate,
|
||||
predicate_binary=encode_predicate_as_base64(predicate).as_str(),
|
||||
|
@ -560,7 +557,7 @@ async fn execute(
|
|||
let mut decoder = IngesterStreamDecoder::new(
|
||||
ingester_address,
|
||||
catalog_cache,
|
||||
expected_schema,
|
||||
cached_table,
|
||||
span_recorder.child_span("IngesterStreamDecoder"),
|
||||
);
|
||||
for (msg, md) in messages {
|
||||
|
@ -580,7 +577,7 @@ struct IngesterStreamDecoder {
|
|||
current_chunk: Option<(Schema, Vec<RecordBatch>)>,
|
||||
ingester_address: Arc<str>,
|
||||
catalog_cache: Arc<CatalogCache>,
|
||||
expected_schema: Arc<Schema>,
|
||||
cached_table: Arc<CachedTable>,
|
||||
span_recorder: SpanRecorder,
|
||||
}
|
||||
|
||||
|
@ -589,7 +586,7 @@ impl IngesterStreamDecoder {
|
|||
fn new(
|
||||
ingester_address: Arc<str>,
|
||||
catalog_cache: Arc<CatalogCache>,
|
||||
expected_schema: Arc<Schema>,
|
||||
cached_table: Arc<CachedTable>,
|
||||
span: Option<Span>,
|
||||
) -> Self {
|
||||
Self {
|
||||
|
@ -598,7 +595,7 @@ impl IngesterStreamDecoder {
|
|||
current_chunk: None,
|
||||
ingester_address,
|
||||
catalog_cache,
|
||||
expected_schema,
|
||||
cached_table,
|
||||
span_recorder: SpanRecorder::new(span),
|
||||
}
|
||||
}
|
||||
|
@ -632,18 +629,27 @@ impl IngesterStreamDecoder {
|
|||
let primary_keys: Vec<_> = schemas.iter().map(|s| s.primary_key()).collect();
|
||||
let primary_key: Vec<_> = primary_keys
|
||||
.iter()
|
||||
.flat_map(|pk| pk.iter().copied())
|
||||
.flat_map(|pk| pk.iter())
|
||||
// cache may be older then the ingester response status, so some entries might be missing
|
||||
.filter_map(|name| {
|
||||
self.cached_table
|
||||
.column_id_map_rev
|
||||
.get(&Arc::from(name.to_owned()))
|
||||
})
|
||||
.copied()
|
||||
.collect();
|
||||
let partition_sort_key = self
|
||||
.catalog_cache
|
||||
.partition()
|
||||
.sort_key(
|
||||
Arc::clone(&self.cached_table),
|
||||
current_partition.partition_id(),
|
||||
&primary_key,
|
||||
self.span_recorder
|
||||
.child_span("cache GET partition sort key"),
|
||||
)
|
||||
.await;
|
||||
.await
|
||||
.map(|sort_key| Arc::clone(&sort_key.sort_key));
|
||||
let current_partition = current_partition.with_partition_sort_key(partition_sort_key);
|
||||
self.finished_partitions
|
||||
.insert(current_partition.partition_id, current_partition);
|
||||
|
@ -679,6 +685,7 @@ impl IngesterStreamDecoder {
|
|||
.catalog_cache
|
||||
.partition()
|
||||
.shard_id(
|
||||
Arc::clone(&self.cached_table),
|
||||
partition_id,
|
||||
self.span_recorder
|
||||
.child_span("cache GET partition shard ID"),
|
||||
|
@ -687,7 +694,7 @@ impl IngesterStreamDecoder {
|
|||
|
||||
// Use a temporary empty partition sort key. We are going to fetch this AFTER we know all chunks because
|
||||
// then we are able to detect all relevant primary key columns that the sort key must cover.
|
||||
let partition_sort_key = Arc::new(None);
|
||||
let partition_sort_key = None;
|
||||
|
||||
let partition = IngesterPartition::new(
|
||||
Arc::clone(&self.ingester_address),
|
||||
|
@ -713,7 +720,8 @@ impl IngesterStreamDecoder {
|
|||
let column_names: Vec<_> =
|
||||
schema.fields().iter().map(|f| f.name().as_str()).collect();
|
||||
let schema = self
|
||||
.expected_schema
|
||||
.cached_table
|
||||
.schema
|
||||
.select_by_names(&column_names)
|
||||
.context(ConvertingSchemaSnafu)?;
|
||||
self.current_chunk = Some((schema, vec![]));
|
||||
|
@ -775,10 +783,9 @@ impl IngesterConnection for IngesterConnectionImpl {
|
|||
&self,
|
||||
shard_indexes: Option<Vec<ShardIndex>>,
|
||||
namespace_id: NamespaceId,
|
||||
table_id: TableId,
|
||||
cached_table: Arc<CachedTable>,
|
||||
columns: Vec<String>,
|
||||
predicate: &Predicate,
|
||||
expected_schema: Arc<Schema>,
|
||||
span: Option<Span>,
|
||||
) -> Result<Vec<IngesterPartition>> {
|
||||
let relevant_ingester_addresses = match shard_indexes {
|
||||
|
@ -835,10 +842,9 @@ impl IngesterConnection for IngesterConnectionImpl {
|
|||
catalog_cache: Arc::clone(&self.catalog_cache),
|
||||
ingester_address: Arc::clone(&ingester_address),
|
||||
namespace_id,
|
||||
table_id,
|
||||
cached_table: Arc::clone(&cached_table),
|
||||
columns: columns.clone(),
|
||||
predicate,
|
||||
expected_schema: Arc::clone(&expected_schema),
|
||||
};
|
||||
|
||||
let backoff_config = self.backoff_config.clone();
|
||||
|
@ -965,7 +971,7 @@ pub struct IngesterPartition {
|
|||
tombstone_max_sequence_number: Option<SequenceNumber>,
|
||||
|
||||
/// Partition-wide sort key.
|
||||
partition_sort_key: Arc<Option<SortKey>>,
|
||||
partition_sort_key: Option<Arc<SortKey>>,
|
||||
|
||||
chunks: Vec<IngesterChunk>,
|
||||
}
|
||||
|
@ -979,7 +985,7 @@ impl IngesterPartition {
|
|||
shard_id: ShardId,
|
||||
parquet_max_sequence_number: Option<SequenceNumber>,
|
||||
tombstone_max_sequence_number: Option<SequenceNumber>,
|
||||
partition_sort_key: Arc<Option<SortKey>>,
|
||||
partition_sort_key: Option<Arc<SortKey>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
ingester,
|
||||
|
@ -1029,7 +1035,7 @@ impl IngesterPartition {
|
|||
chunk_id,
|
||||
partition_id: self.partition_id,
|
||||
schema: expected_schema,
|
||||
partition_sort_key: Arc::clone(&self.partition_sort_key),
|
||||
partition_sort_key: self.partition_sort_key.clone(),
|
||||
batches,
|
||||
ts_min_max,
|
||||
summary,
|
||||
|
@ -1041,13 +1047,13 @@ impl IngesterPartition {
|
|||
}
|
||||
|
||||
/// Update partition sort key
|
||||
pub(crate) fn with_partition_sort_key(self, partition_sort_key: Arc<Option<SortKey>>) -> Self {
|
||||
pub(crate) fn with_partition_sort_key(self, partition_sort_key: Option<Arc<SortKey>>) -> Self {
|
||||
Self {
|
||||
partition_sort_key: Arc::clone(&partition_sort_key),
|
||||
partition_sort_key: partition_sort_key.clone(),
|
||||
chunks: self
|
||||
.chunks
|
||||
.into_iter()
|
||||
.map(|c| c.with_partition_sort_key(Arc::clone(&partition_sort_key)))
|
||||
.map(|c| c.with_partition_sort_key(partition_sort_key.clone()))
|
||||
.collect(),
|
||||
..self
|
||||
}
|
||||
|
@ -1089,7 +1095,7 @@ pub struct IngesterChunk {
|
|||
schema: Arc<Schema>,
|
||||
|
||||
/// Partition-wide sort key.
|
||||
partition_sort_key: Arc<Option<SortKey>>,
|
||||
partition_sort_key: Option<Arc<SortKey>>,
|
||||
|
||||
/// The raw table data
|
||||
batches: Vec<RecordBatch>,
|
||||
|
@ -1102,7 +1108,7 @@ pub struct IngesterChunk {
|
|||
}
|
||||
|
||||
impl IngesterChunk {
|
||||
pub(crate) fn with_partition_sort_key(self, partition_sort_key: Arc<Option<SortKey>>) -> Self {
|
||||
pub(crate) fn with_partition_sort_key(self, partition_sort_key: Option<Arc<SortKey>>) -> Self {
|
||||
Self {
|
||||
partition_sort_key,
|
||||
..self
|
||||
|
@ -1141,7 +1147,7 @@ impl QueryChunkMeta for IngesterChunk {
|
|||
}
|
||||
|
||||
fn partition_sort_key(&self) -> Option<&SortKey> {
|
||||
self.partition_sort_key.as_ref().as_ref()
|
||||
self.partition_sort_key.as_ref().map(|sk| sk.as_ref())
|
||||
}
|
||||
|
||||
fn partition_id(&self) -> PartitionId {
|
||||
|
@ -1273,6 +1279,7 @@ mod tests {
|
|||
datatypes::Int32Type,
|
||||
};
|
||||
use assert_matches::assert_matches;
|
||||
use data_types::TableId;
|
||||
use generated_types::influxdata::iox::ingester::v1::PartitionStatus;
|
||||
use influxdb_iox_client::flight::generated_types::IngesterQueryResponseMetadata;
|
||||
use iox_tests::util::TestCatalog;
|
||||
|
@ -1392,6 +1399,7 @@ mod tests {
|
|||
status: Some(PartitionStatus {
|
||||
parquet_max_sequence_number: None,
|
||||
}),
|
||||
ingester_uuid: String::new(),
|
||||
},
|
||||
))],
|
||||
}),
|
||||
|
@ -1422,6 +1430,7 @@ mod tests {
|
|||
IngesterQueryResponseMetadata {
|
||||
partition_id: 1,
|
||||
status: None,
|
||||
ingester_uuid: String::new(),
|
||||
},
|
||||
))],
|
||||
}),
|
||||
|
@ -1447,6 +1456,7 @@ mod tests {
|
|||
status: Some(PartitionStatus {
|
||||
parquet_max_sequence_number: None,
|
||||
}),
|
||||
ingester_uuid: String::new(),
|
||||
},
|
||||
)),
|
||||
Ok((
|
||||
|
@ -1456,6 +1466,7 @@ mod tests {
|
|||
status: Some(PartitionStatus {
|
||||
parquet_max_sequence_number: None,
|
||||
}),
|
||||
ingester_uuid: String::new(),
|
||||
},
|
||||
)),
|
||||
Ok((
|
||||
|
@ -1465,6 +1476,7 @@ mod tests {
|
|||
status: Some(PartitionStatus {
|
||||
parquet_max_sequence_number: None,
|
||||
}),
|
||||
ingester_uuid: String::new(),
|
||||
},
|
||||
)),
|
||||
],
|
||||
|
@ -1544,6 +1556,7 @@ mod tests {
|
|||
status: Some(PartitionStatus {
|
||||
parquet_max_sequence_number: Some(11),
|
||||
}),
|
||||
ingester_uuid: String::new(),
|
||||
},
|
||||
)),
|
||||
Ok((
|
||||
|
@ -1573,6 +1586,7 @@ mod tests {
|
|||
status: Some(PartitionStatus {
|
||||
parquet_max_sequence_number: Some(21),
|
||||
}),
|
||||
ingester_uuid: String::new(),
|
||||
},
|
||||
)),
|
||||
Ok((
|
||||
|
@ -1597,6 +1611,7 @@ mod tests {
|
|||
status: Some(PartitionStatus {
|
||||
parquet_max_sequence_number: Some(31),
|
||||
}),
|
||||
ingester_uuid: String::new(),
|
||||
},
|
||||
)),
|
||||
Ok((
|
||||
|
@ -1776,6 +1791,7 @@ mod tests {
|
|||
status: Some(PartitionStatus {
|
||||
parquet_max_sequence_number: Some(11),
|
||||
}),
|
||||
ingester_uuid: String::new(),
|
||||
},
|
||||
)),
|
||||
Ok((
|
||||
|
@ -1831,16 +1847,14 @@ mod tests {
|
|||
span: Option<Span>,
|
||||
) -> Result<Vec<IngesterPartition>, Error> {
|
||||
let columns = vec![String::from("col")];
|
||||
let schema = schema();
|
||||
let shard_indexes: Vec<_> = shard_indexes.iter().copied().map(ShardIndex::new).collect();
|
||||
ingester_conn
|
||||
.partitions(
|
||||
Some(shard_indexes),
|
||||
NamespaceId::new(1),
|
||||
TableId::new(2),
|
||||
cached_table(),
|
||||
columns,
|
||||
&Predicate::default(),
|
||||
schema,
|
||||
span,
|
||||
)
|
||||
.await
|
||||
|
@ -1987,7 +2001,7 @@ mod tests {
|
|||
ShardId::new(1),
|
||||
parquet_max_sequence_number,
|
||||
tombstone_max_sequence_number,
|
||||
Arc::new(None),
|
||||
None,
|
||||
)
|
||||
.try_add_chunk(ChunkId::new(), Arc::clone(&expected_schema), vec![case])
|
||||
.unwrap();
|
||||
|
@ -2020,7 +2034,7 @@ mod tests {
|
|||
ShardId::new(1),
|
||||
parquet_max_sequence_number,
|
||||
tombstone_max_sequence_number,
|
||||
Arc::new(None),
|
||||
None,
|
||||
)
|
||||
.try_add_chunk(ChunkId::new(), Arc::clone(&expected_schema), vec![batch])
|
||||
.unwrap_err();
|
||||
|
@ -2060,4 +2074,14 @@ mod tests {
|
|||
fn i64_vec() -> &'static [Option<i64>] {
|
||||
&[Some(1), Some(2), Some(3)]
|
||||
}
|
||||
|
||||
fn cached_table() -> Arc<CachedTable> {
|
||||
Arc::new(CachedTable {
|
||||
id: TableId::new(2),
|
||||
schema: schema(),
|
||||
column_id_map: Default::default(),
|
||||
column_id_map_rev: Default::default(),
|
||||
primary_key_column_ids: Default::default(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
use crate::cache::namespace::CachedTable;
|
||||
|
||||
use super::IngesterConnection;
|
||||
use async_trait::async_trait;
|
||||
use data_types::NamespaceId;
|
||||
use data_types::ShardIndex;
|
||||
use data_types::TableId;
|
||||
use generated_types::influxdata::iox::ingester::v1::GetWriteInfoResponse;
|
||||
use iox_query::util::create_basic_summary;
|
||||
use parking_lot::Mutex;
|
||||
|
@ -36,10 +37,9 @@ impl IngesterConnection for MockIngesterConnection {
|
|||
&self,
|
||||
_shard_indexes: Option<Vec<ShardIndex>>,
|
||||
_namespace_id: NamespaceId,
|
||||
_table_id: TableId,
|
||||
_cached_table: Arc<CachedTable>,
|
||||
columns: Vec<String>,
|
||||
_predicate: &predicate::Predicate,
|
||||
_expected_schema: Arc<schema::Schema>,
|
||||
_span: Option<Span>,
|
||||
) -> super::Result<Vec<super::IngesterPartition>> {
|
||||
// see if we want to do projection pushdown
|
||||
|
|
|
@ -122,9 +122,6 @@ pub struct QuerierTable {
|
|||
/// Interface to create chunks for this table.
|
||||
chunk_adapter: Arc<ChunkAdapter>,
|
||||
|
||||
/// Handle reconciling ingester and catalog data
|
||||
reconciler: Reconciler,
|
||||
|
||||
/// Executor for queries.
|
||||
exec: Arc<Executor>,
|
||||
|
||||
|
@ -149,12 +146,6 @@ impl QuerierTable {
|
|||
prune_metrics,
|
||||
} = args;
|
||||
|
||||
let reconciler = Reconciler::new(
|
||||
Arc::clone(&table_name),
|
||||
Arc::clone(&namespace_name),
|
||||
Arc::clone(&chunk_adapter),
|
||||
);
|
||||
|
||||
Self {
|
||||
sharder,
|
||||
namespace_name,
|
||||
|
@ -165,7 +156,6 @@ impl QuerierTable {
|
|||
schema,
|
||||
ingester_connection,
|
||||
chunk_adapter,
|
||||
reconciler,
|
||||
exec,
|
||||
prune_metrics,
|
||||
}
|
||||
|
@ -259,7 +249,7 @@ impl QuerierTable {
|
|||
self.ingester_partitions(
|
||||
&predicate,
|
||||
span_recorder.child_span("ingester partitions"),
|
||||
projection
|
||||
projection,
|
||||
),
|
||||
catalog_cache.parquet_file().get(
|
||||
self.id(),
|
||||
|
@ -324,82 +314,87 @@ impl QuerierTable {
|
|||
span_recorder.child_span("cache GET namespace schema"),
|
||||
)
|
||||
.await;
|
||||
let cached_table = cached_namespace
|
||||
let Some(cached_table) = cached_namespace
|
||||
.as_ref()
|
||||
.and_then(|ns| ns.tables.get(self.table_name.as_ref()));
|
||||
.and_then(|ns| ns.tables.get(self.table_name.as_ref())) else {
|
||||
return Ok(vec![]);
|
||||
};
|
||||
|
||||
let reconciler = Reconciler::new(
|
||||
Arc::clone(&self.table_name),
|
||||
Arc::clone(&self.namespace_name),
|
||||
Arc::clone(&self.chunk_adapter),
|
||||
Arc::clone(cached_table),
|
||||
);
|
||||
|
||||
// create parquet files
|
||||
let parquet_files: Vec<_> = match cached_table {
|
||||
Some(cached_table) => {
|
||||
// use nested scope because we span many child scopes here and it's easier to
|
||||
// aggregate / collapse in the UI
|
||||
let span_recorder = span_recorder.child("parquet chunks");
|
||||
let parquet_files: Vec<_> = {
|
||||
// use nested scope because we span many child scopes here and it's easier to
|
||||
// aggregate / collapse in the UI
|
||||
let span_recorder = span_recorder.child("parquet chunks");
|
||||
|
||||
let basic_summaries: Vec<_> = parquet_files
|
||||
.files
|
||||
.iter()
|
||||
.map(|p| {
|
||||
Arc::new(create_basic_summary(
|
||||
p.row_count as u64,
|
||||
&cached_table.schema,
|
||||
TimestampMinMax {
|
||||
min: p.min_time.get(),
|
||||
max: p.max_time.get(),
|
||||
},
|
||||
))
|
||||
})
|
||||
.collect();
|
||||
let basic_summaries: Vec<_> = parquet_files
|
||||
.files
|
||||
.iter()
|
||||
.map(|p| {
|
||||
Arc::new(create_basic_summary(
|
||||
p.row_count as u64,
|
||||
&cached_table.schema,
|
||||
TimestampMinMax {
|
||||
min: p.min_time.get(),
|
||||
max: p.max_time.get(),
|
||||
},
|
||||
))
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Prune on the most basic summary data (timestamps and column names) before trying to fully load the chunks
|
||||
let keeps = match prune_summaries(
|
||||
Arc::clone(&cached_table.schema),
|
||||
&basic_summaries,
|
||||
&predicate,
|
||||
) {
|
||||
Ok(keeps) => keeps,
|
||||
Err(reason) => {
|
||||
// Ignore pruning failures here - the chunk pruner should have already logged them.
|
||||
// Just skip pruning and gather all the metadata. We have another chance to prune them
|
||||
// once all the metadata is available
|
||||
debug!(?reason, "Could not prune before metadata fetch");
|
||||
vec![true; basic_summaries.len()]
|
||||
// Prune on the most basic summary data (timestamps and column names) before trying to fully load the chunks
|
||||
let keeps = match prune_summaries(
|
||||
Arc::clone(&cached_table.schema),
|
||||
&basic_summaries,
|
||||
&predicate,
|
||||
) {
|
||||
Ok(keeps) => keeps,
|
||||
Err(reason) => {
|
||||
// Ignore pruning failures here - the chunk pruner should have already logged them.
|
||||
// Just skip pruning and gather all the metadata. We have another chance to prune them
|
||||
// once all the metadata is available
|
||||
debug!(?reason, "Could not prune before metadata fetch");
|
||||
vec![true; basic_summaries.len()]
|
||||
}
|
||||
};
|
||||
|
||||
let early_pruning_observer =
|
||||
&MetricPruningObserver::new(Arc::clone(&self.prune_metrics));
|
||||
|
||||
futures::stream::iter(parquet_files.files.iter().cloned().zip(keeps))
|
||||
.filter(|(cached_parquet_file, keep)| {
|
||||
if !keep {
|
||||
early_pruning_observer.was_pruned_early(
|
||||
cached_parquet_file.row_count as u64,
|
||||
cached_parquet_file.file_size_bytes as u64,
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
let early_pruning_observer =
|
||||
&MetricPruningObserver::new(Arc::clone(&self.prune_metrics));
|
||||
|
||||
futures::stream::iter(parquet_files.files.iter().cloned().zip(keeps))
|
||||
.filter(|(cached_parquet_file, keep)| {
|
||||
if !keep {
|
||||
early_pruning_observer.was_pruned_early(
|
||||
cached_parquet_file.row_count as u64,
|
||||
cached_parquet_file.file_size_bytes as u64,
|
||||
);
|
||||
}
|
||||
|
||||
let keep = *keep;
|
||||
async move { keep }
|
||||
})
|
||||
.map(|(cached_parquet_file, _keep)| {
|
||||
let span_recorder = &span_recorder;
|
||||
async move {
|
||||
let span = span_recorder.child_span("new_chunk");
|
||||
self.chunk_adapter
|
||||
.new_chunk(Arc::clone(cached_table), cached_parquet_file, span)
|
||||
.await
|
||||
}
|
||||
})
|
||||
.buffer_unordered(CONCURRENT_CHUNK_CREATION_JOBS)
|
||||
.filter_map(|x| async { x })
|
||||
.collect()
|
||||
.await
|
||||
}
|
||||
_ => Vec::new(),
|
||||
let keep = *keep;
|
||||
async move { keep }
|
||||
})
|
||||
.map(|(cached_parquet_file, _keep)| {
|
||||
let span_recorder = &span_recorder;
|
||||
async move {
|
||||
let span = span_recorder.child_span("new_chunk");
|
||||
self.chunk_adapter
|
||||
.new_chunk(Arc::clone(cached_table), cached_parquet_file, span)
|
||||
.await
|
||||
}
|
||||
})
|
||||
.buffer_unordered(CONCURRENT_CHUNK_CREATION_JOBS)
|
||||
.filter_map(|x| async { x })
|
||||
.collect()
|
||||
.await
|
||||
};
|
||||
|
||||
let chunks = self
|
||||
.reconciler
|
||||
let chunks = reconciler
|
||||
.reconcile(
|
||||
partitions,
|
||||
tombstones.to_vec(),
|
||||
|
@ -416,7 +411,8 @@ impl QuerierTable {
|
|||
.chunk_pruner()
|
||||
.prune_chunks(
|
||||
self.table_name(),
|
||||
Arc::clone(&self.schema),
|
||||
// use up-to-date schema
|
||||
Arc::clone(&cached_table.schema),
|
||||
chunks,
|
||||
&predicate,
|
||||
)
|
||||
|
@ -487,15 +483,17 @@ impl QuerierTable {
|
|||
.as_ref()
|
||||
.map(|sharder| vec![**sharder.shard_for_query(&self.table_name, &self.namespace_name)]);
|
||||
|
||||
// get cached table w/o any must-coverage information
|
||||
let Some(cached_table) = self.chunk_adapter.catalog_cache().namespace().get(Arc::clone(&self.namespace_name), &[], span_recorder.child_span("get namespace")).await.and_then(|ns| ns.tables.get(&self.table_name).cloned()) else {return Ok(vec![])};
|
||||
|
||||
// get any chunks from the ingester(s)
|
||||
let partitions_result = ingester_connection
|
||||
.partitions(
|
||||
shard_indexes,
|
||||
self.namespace_id,
|
||||
self.table_id,
|
||||
cached_table,
|
||||
columns,
|
||||
predicate,
|
||||
Arc::clone(&self.schema),
|
||||
span_recorder.child_span("IngesterConnection partitions"),
|
||||
)
|
||||
.await
|
||||
|
|
|
@ -2,7 +2,9 @@
|
|||
|
||||
mod interface;
|
||||
|
||||
use data_types::{CompactionLevel, DeletePredicate, PartitionId, ShardId, Tombstone, TombstoneId};
|
||||
use data_types::{
|
||||
ColumnId, CompactionLevel, DeletePredicate, PartitionId, ShardId, Tombstone, TombstoneId,
|
||||
};
|
||||
use iox_query::QueryChunk;
|
||||
use observability_deps::tracing::debug;
|
||||
use schema::sort::SortKey;
|
||||
|
@ -14,6 +16,7 @@ use std::{
|
|||
use trace::span::{Span, SpanRecorder};
|
||||
|
||||
use crate::{
|
||||
cache::namespace::CachedTable,
|
||||
chunk::{ChunkAdapter, QuerierChunk},
|
||||
ingester::IngesterChunk,
|
||||
tombstone::QuerierTombstone,
|
||||
|
@ -35,6 +38,7 @@ pub struct Reconciler {
|
|||
table_name: Arc<str>,
|
||||
namespace_name: Arc<str>,
|
||||
chunk_adapter: Arc<ChunkAdapter>,
|
||||
cached_table: Arc<CachedTable>,
|
||||
}
|
||||
|
||||
impl Reconciler {
|
||||
|
@ -42,11 +46,13 @@ impl Reconciler {
|
|||
table_name: Arc<str>,
|
||||
namespace_name: Arc<str>,
|
||||
chunk_adapter: Arc<ChunkAdapter>,
|
||||
cached_table: Arc<CachedTable>,
|
||||
) -> Self {
|
||||
Self {
|
||||
table_name,
|
||||
namespace_name,
|
||||
chunk_adapter,
|
||||
cached_table,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -233,31 +239,32 @@ impl Reconciler {
|
|||
let span_recorder = SpanRecorder::new(span);
|
||||
|
||||
// collect columns
|
||||
let chunk_schemas: Vec<_> = chunks
|
||||
.iter()
|
||||
.map(|c| (c.partition_id(), c.schema()))
|
||||
.collect();
|
||||
let mut all_columns: HashMap<PartitionId, Vec<&str>> = HashMap::new();
|
||||
for (partition_id, schema) in &chunk_schemas {
|
||||
let mut all_columns: HashMap<PartitionId, HashSet<ColumnId>> = HashMap::new();
|
||||
for c in &chunks {
|
||||
// columns for this partition MUST include the primary key of this chunk
|
||||
all_columns
|
||||
.entry(*partition_id)
|
||||
.or_default()
|
||||
.extend(schema.primary_key().iter());
|
||||
let schema = c.schema();
|
||||
let pk = schema
|
||||
.primary_key()
|
||||
.into_iter()
|
||||
.filter_map(|name| self.cached_table.column_id_map_rev.get(name).copied());
|
||||
all_columns.entry(c.partition_id()).or_default().extend(pk);
|
||||
}
|
||||
|
||||
// get cached (or fresh) sort keys
|
||||
let partition_cache = self.chunk_adapter.catalog_cache().partition();
|
||||
let mut sort_keys: HashMap<PartitionId, Arc<Option<SortKey>>> =
|
||||
let mut sort_keys: HashMap<PartitionId, Option<Arc<SortKey>>> =
|
||||
HashMap::with_capacity(all_columns.len());
|
||||
for (partition_id, columns) in all_columns.into_iter() {
|
||||
let columns: Vec<ColumnId> = columns.into_iter().collect();
|
||||
let sort_key = partition_cache
|
||||
.sort_key(
|
||||
Arc::clone(&self.cached_table),
|
||||
partition_id,
|
||||
&columns,
|
||||
span_recorder.child_span("cache GET partition sort key"),
|
||||
)
|
||||
.await;
|
||||
.await
|
||||
.map(|sk| Arc::clone(&sk.sort_key));
|
||||
sort_keys.insert(partition_id, sort_key);
|
||||
}
|
||||
|
||||
|
@ -269,7 +276,7 @@ impl Reconciler {
|
|||
let sort_key = sort_keys
|
||||
.get(&partition_id)
|
||||
.expect("sort key for this partition should be fetched by now");
|
||||
chunk.update_partition_sort_key(Arc::clone(sort_key))
|
||||
chunk.update_partition_sort_key(sort_key.clone())
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
@ -288,7 +295,7 @@ impl Reconciler {
|
|||
trait UpdatableQuerierChunk: QueryChunk {
|
||||
fn update_partition_sort_key(
|
||||
self: Box<Self>,
|
||||
sort_key: Arc<Option<SortKey>>,
|
||||
sort_key: Option<Arc<SortKey>>,
|
||||
) -> Box<dyn UpdatableQuerierChunk>;
|
||||
|
||||
fn upcast_to_querier_chunk(self: Box<Self>) -> Box<dyn QueryChunk>;
|
||||
|
@ -297,7 +304,7 @@ trait UpdatableQuerierChunk: QueryChunk {
|
|||
impl UpdatableQuerierChunk for QuerierChunk {
|
||||
fn update_partition_sort_key(
|
||||
self: Box<Self>,
|
||||
sort_key: Arc<Option<SortKey>>,
|
||||
sort_key: Option<Arc<SortKey>>,
|
||||
) -> Box<dyn UpdatableQuerierChunk> {
|
||||
Box::new(self.with_partition_sort_key(sort_key))
|
||||
}
|
||||
|
@ -310,7 +317,7 @@ impl UpdatableQuerierChunk for QuerierChunk {
|
|||
impl UpdatableQuerierChunk for IngesterChunk {
|
||||
fn update_partition_sort_key(
|
||||
self: Box<Self>,
|
||||
sort_key: Arc<Option<SortKey>>,
|
||||
sort_key: Option<Arc<SortKey>>,
|
||||
) -> Box<dyn UpdatableQuerierChunk> {
|
||||
Box::new(self.with_partition_sort_key(sort_key))
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ pub(crate) struct IngesterPartitionBuilder {
|
|||
ingester_name: Arc<str>,
|
||||
ingester_chunk_id: u128,
|
||||
|
||||
partition_sort_key: Arc<Option<SortKey>>,
|
||||
partition_sort_key: Option<Arc<SortKey>>,
|
||||
|
||||
/// Data returned from the partition, in line protocol format
|
||||
lp: Vec<String>,
|
||||
|
@ -86,7 +86,7 @@ impl IngesterPartitionBuilder {
|
|||
shard: Arc::clone(shard),
|
||||
partition: Arc::clone(partition),
|
||||
ingester_name: Arc::from("ingester1"),
|
||||
partition_sort_key: Arc::new(None),
|
||||
partition_sort_key: None,
|
||||
ingester_chunk_id: 1,
|
||||
lp: Vec::new(),
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ impl IngesterPartitionBuilder {
|
|||
self.shard.shard.id,
|
||||
parquet_max_sequence_number,
|
||||
tombstone_max_sequence_number,
|
||||
Arc::clone(&self.partition_sort_key),
|
||||
self.partition_sort_key.clone(),
|
||||
)
|
||||
.try_add_chunk(
|
||||
ChunkId::new_test(self.ingester_chunk_id),
|
||||
|
|
|
@ -1031,6 +1031,8 @@ impl QueryDataAdapter {
|
|||
.parquet_max_sequence_number
|
||||
.map(|x| x.get()),
|
||||
}),
|
||||
// Only used in ingester2.
|
||||
ingester_uuid: String::new(),
|
||||
},
|
||||
),
|
||||
FlatIngesterQueryResponse::StartSnapshot { schema } => (
|
||||
|
|
|
@ -50,7 +50,7 @@ criterion = { version = "0.4", default-features = false, features = ["async_toki
|
|||
influxdb_line_protocol = { path = "../influxdb_line_protocol" }
|
||||
iox_tests = { path = "../iox_tests" }
|
||||
once_cell = "1"
|
||||
paste = "1.0.9"
|
||||
paste = "1.0.10"
|
||||
pretty_assertions = "1.3.0"
|
||||
rand = "0.8.3"
|
||||
schema = { path = "../schema" }
|
||||
|
|
|
@ -11,6 +11,7 @@ arrow_util = { path = "../arrow_util" }
|
|||
assert_cmd = "2.0.7"
|
||||
bytes = "1.3"
|
||||
data_types = { path = "../data_types" }
|
||||
escargot = "0.5"
|
||||
futures = "0.3"
|
||||
generated_types = { path = "../generated_types" }
|
||||
http = "0.2.8"
|
||||
|
@ -30,3 +31,8 @@ tokio = { version = "1.22", features = ["macros", "net", "parking_lot", "rt-mult
|
|||
tokio-util = "0.7"
|
||||
tonic = "0.8"
|
||||
workspace-hack = { path = "../workspace-hack"}
|
||||
|
||||
[features]
|
||||
# Temporary feature to use the RPC write path instead of the write buffer during the transition
|
||||
# away from using Kafka.
|
||||
rpc_write = []
|
||||
|
|
|
@ -29,6 +29,9 @@ pub struct TestConfig {
|
|||
/// Object store directory, if needed.
|
||||
object_store_dir: Option<Arc<TempDir>>,
|
||||
|
||||
/// WAL directory, if needed.
|
||||
wal_dir: Option<Arc<TempDir>>,
|
||||
|
||||
/// Which ports this server should use
|
||||
addrs: Arc<BindAddresses>,
|
||||
}
|
||||
|
@ -49,6 +52,7 @@ impl TestConfig {
|
|||
catalog_schema_name: catalog_schema_name.into(),
|
||||
write_buffer_dir: None,
|
||||
object_store_dir: None,
|
||||
wal_dir: None,
|
||||
addrs: Arc::new(BindAddresses::default()),
|
||||
}
|
||||
}
|
||||
|
@ -61,6 +65,26 @@ impl TestConfig {
|
|||
.with_new_object_store()
|
||||
}
|
||||
|
||||
/// Create a minimal router configuration sharing configuration with the ingester config
|
||||
pub fn new_router_rpc_write(ingester_config: &TestConfig) -> Self {
|
||||
assert_eq!(ingester_config.server_type(), ServerType::IngesterRpcWrite);
|
||||
|
||||
Self::new(
|
||||
ServerType::RouterRpcWrite,
|
||||
ingester_config.dsn().to_owned(),
|
||||
ingester_config.catalog_schema_name(),
|
||||
)
|
||||
.with_existing_object_store(ingester_config)
|
||||
.with_env(
|
||||
"INFLUXDB_IOX_INGESTER_ADDRESSES",
|
||||
ingester_config
|
||||
.addrs()
|
||||
.ingester_grpc_api()
|
||||
.bind_addr()
|
||||
.as_ref(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a minimal ingester configuration, using the dsn and
|
||||
/// write buffer configuration from other
|
||||
pub fn new_ingester(other: &TestConfig) -> Self {
|
||||
|
@ -74,6 +98,19 @@ impl TestConfig {
|
|||
.with_default_ingester_options()
|
||||
}
|
||||
|
||||
/// Create a minimal ingester configuration, using the dsn configuration from other
|
||||
pub fn new_ingester_rpc_write(dsn: impl Into<String>) -> Self {
|
||||
let dsn = Some(dsn.into());
|
||||
Self::new(
|
||||
ServerType::IngesterRpcWrite,
|
||||
dsn,
|
||||
random_catalog_schema_name(),
|
||||
)
|
||||
.with_new_object_store()
|
||||
.with_new_wal()
|
||||
.with_default_ingester_options()
|
||||
}
|
||||
|
||||
/// Create a minimal querier configuration from the specified
|
||||
/// ingester configuration, using the same dsn and object store,
|
||||
/// and pointing at the specified ingester
|
||||
|
@ -267,9 +304,18 @@ impl TestConfig {
|
|||
self
|
||||
}
|
||||
|
||||
/// Configures a new objct store
|
||||
/// Configures a new WAL
|
||||
pub fn with_new_wal(mut self) -> Self {
|
||||
let tmpdir = TempDir::new().expect("cannot create tmp dir");
|
||||
|
||||
let wal_string = tmpdir.path().display().to_string();
|
||||
self.wal_dir = Some(Arc::new(tmpdir));
|
||||
self.with_env("INFLUXDB_IOX_WAL_DIRECTORY", &wal_string)
|
||||
}
|
||||
|
||||
/// Configures a new object store
|
||||
pub fn with_new_object_store(mut self) -> Self {
|
||||
let tmpdir = TempDir::new().expect("can not create tmp dir");
|
||||
let tmpdir = TempDir::new().expect("cannot create tmp dir");
|
||||
|
||||
let object_store_string = tmpdir.path().display().to_string();
|
||||
self.object_store_dir = Some(Arc::new(tmpdir));
|
||||
|
|
|
@ -152,6 +152,18 @@ impl MiniCluster {
|
|||
.with_compactor_config(compactor_config)
|
||||
}
|
||||
|
||||
pub async fn create_non_shared_rpc_write(database_url: String) -> Self {
|
||||
let ingester_config = TestConfig::new_ingester_rpc_write(&database_url);
|
||||
let router_config = TestConfig::new_router_rpc_write(&ingester_config);
|
||||
|
||||
// Set up the cluster ====================================
|
||||
Self::new()
|
||||
.with_ingester(ingester_config)
|
||||
.await
|
||||
.with_router(router_config)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Create an all-(minus compactor)-in-one server with the specified configuration
|
||||
pub async fn create_all_in_one(test_config: TestConfig) -> Self {
|
||||
Self::new()
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
use assert_cmd::prelude::*;
|
||||
use futures::prelude::*;
|
||||
use influxdb_iox_client::connection::Connection;
|
||||
use observability_deps::tracing::{info, warn};
|
||||
|
@ -7,7 +6,7 @@ use std::{
|
|||
fs::OpenOptions,
|
||||
ops::DerefMut,
|
||||
path::Path,
|
||||
process::{Child, Command},
|
||||
process::Child,
|
||||
str,
|
||||
sync::{Arc, Weak},
|
||||
time::Duration,
|
||||
|
@ -185,7 +184,7 @@ impl Connections {
|
|||
let server_type = test_config.server_type();
|
||||
|
||||
self.router_grpc_connection = match server_type {
|
||||
ServerType::AllInOne | ServerType::Router => {
|
||||
ServerType::AllInOne | ServerType::Router | ServerType::RouterRpcWrite => {
|
||||
let client_base = test_config.addrs().router_grpc_api().client_base();
|
||||
Some(
|
||||
grpc_channel(test_config, client_base.as_ref())
|
||||
|
@ -199,7 +198,7 @@ impl Connections {
|
|||
};
|
||||
|
||||
self.ingester_grpc_connection = match server_type {
|
||||
ServerType::AllInOne | ServerType::Ingester => {
|
||||
ServerType::AllInOne | ServerType::Ingester | ServerType::IngesterRpcWrite => {
|
||||
let client_base = test_config.addrs().ingester_grpc_api().client_base();
|
||||
Some(
|
||||
grpc_channel(test_config, client_base.as_ref())
|
||||
|
@ -335,15 +334,12 @@ impl TestServer {
|
|||
let log_filter =
|
||||
std::env::var("LOG_FILTER").unwrap_or_else(|_| "info,sqlx=warn".to_string());
|
||||
|
||||
let run_command = server_type.run_command();
|
||||
let run_command_name = server_type.run_command();
|
||||
|
||||
// Build the command
|
||||
// This will inherit environment from the test runner
|
||||
// in particular `LOG_FILTER`
|
||||
let mut command = Command::cargo_bin("influxdb_iox").unwrap();
|
||||
let mut command = cargo_run_command();
|
||||
let mut command = command
|
||||
.arg("run")
|
||||
.arg(run_command)
|
||||
.arg(run_command_name)
|
||||
.env("LOG_FILTER", log_filter)
|
||||
// add http/grpc address information
|
||||
.add_addr_env(server_type, test_config.addrs())
|
||||
|
@ -492,7 +488,7 @@ impl TestServer {
|
|||
`influxdb_iox compactor run-once` instead"
|
||||
);
|
||||
}
|
||||
ServerType::Router => {
|
||||
ServerType::Router | ServerType::RouterRpcWrite => {
|
||||
if check_catalog_service_health(
|
||||
server_type,
|
||||
connections.router_grpc_connection(),
|
||||
|
@ -502,7 +498,7 @@ impl TestServer {
|
|||
return;
|
||||
}
|
||||
}
|
||||
ServerType::Ingester => {
|
||||
ServerType::Ingester | ServerType::IngesterRpcWrite => {
|
||||
if check_arrow_service_health(
|
||||
server_type,
|
||||
connections.ingester_grpc_connection(),
|
||||
|
@ -550,6 +546,30 @@ impl TestServer {
|
|||
}
|
||||
}
|
||||
|
||||
// Build the command, with the `rpc_write` feature enabled to allow testing of the RPC
|
||||
// write path.
|
||||
// This will inherit environment from the test runner, in particular, `LOG_FILTER`
|
||||
#[cfg(feature = "rpc_write")]
|
||||
fn cargo_run_command() -> std::process::Command {
|
||||
escargot::CargoBuild::new()
|
||||
.bin("influxdb_iox")
|
||||
.features("rpc_write")
|
||||
.run()
|
||||
.unwrap()
|
||||
.command()
|
||||
}
|
||||
|
||||
// Build the command, WITHOUT the `rpc_write` feature enabled, to not clobber the build.
|
||||
// This will inherit environment from the test runner, in particular, `LOG_FILTER`
|
||||
#[cfg(not(feature = "rpc_write"))]
|
||||
fn cargo_run_command() -> std::process::Command {
|
||||
escargot::CargoBuild::new()
|
||||
.bin("influxdb_iox")
|
||||
.run()
|
||||
.unwrap()
|
||||
.command()
|
||||
}
|
||||
|
||||
/// checks catalog service health, as a proxy for all gRPC
|
||||
/// services. Returns false if the service should be checked again
|
||||
async fn check_catalog_service_health(server_type: ServerType, connection: Connection) -> bool {
|
||||
|
|
|
@ -4,7 +4,9 @@ use super::addrs::BindAddresses;
|
|||
pub enum ServerType {
|
||||
AllInOne,
|
||||
Ingester,
|
||||
IngesterRpcWrite,
|
||||
Router,
|
||||
RouterRpcWrite,
|
||||
Querier,
|
||||
Compactor,
|
||||
}
|
||||
|
@ -15,7 +17,9 @@ impl ServerType {
|
|||
match self {
|
||||
Self::AllInOne => "all-in-one",
|
||||
Self::Ingester => "ingester",
|
||||
Self::IngesterRpcWrite => "ingester2",
|
||||
Self::Router => "router",
|
||||
Self::RouterRpcWrite => "router-rpc-write",
|
||||
Self::Querier => "querier",
|
||||
Self::Compactor => "compactor",
|
||||
}
|
||||
|
@ -73,6 +77,16 @@ fn addr_envs(server_type: ServerType, addrs: &BindAddresses) -> Vec<(&'static st
|
|||
addrs.ingester_grpc_api().bind_addr().to_string(),
|
||||
),
|
||||
],
|
||||
ServerType::IngesterRpcWrite => vec![
|
||||
(
|
||||
"INFLUXDB_IOX_BIND_ADDR",
|
||||
addrs.router_http_api().bind_addr().to_string(),
|
||||
),
|
||||
(
|
||||
"INFLUXDB_IOX_GRPC_BIND_ADDR",
|
||||
addrs.ingester_grpc_api().bind_addr().to_string(),
|
||||
),
|
||||
],
|
||||
ServerType::Router => vec![
|
||||
(
|
||||
"INFLUXDB_IOX_BIND_ADDR",
|
||||
|
@ -83,6 +97,20 @@ fn addr_envs(server_type: ServerType, addrs: &BindAddresses) -> Vec<(&'static st
|
|||
addrs.router_grpc_api().bind_addr().to_string(),
|
||||
),
|
||||
],
|
||||
ServerType::RouterRpcWrite => vec![
|
||||
(
|
||||
"INFLUXDB_IOX_BIND_ADDR",
|
||||
addrs.router_http_api().bind_addr().to_string(),
|
||||
),
|
||||
(
|
||||
"INFLUXDB_IOX_GRPC_BIND_ADDR",
|
||||
addrs.router_grpc_api().bind_addr().to_string(),
|
||||
),
|
||||
(
|
||||
"INFLUXDB_IOX_INGESTER_ADDRESSES",
|
||||
addrs.ingester_grpc_api().bind_addr().to_string(),
|
||||
),
|
||||
],
|
||||
ServerType::Querier => vec![
|
||||
(
|
||||
"INFLUXDB_IOX_BIND_ADDR",
|
||||
|
|
|
@ -25,7 +25,7 @@ bytes = { version = "1", features = ["std"] }
|
|||
chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "iana-time-zone", "serde", "std", "winapi"] }
|
||||
crossbeam-utils = { version = "0.8", features = ["std"] }
|
||||
crypto-common = { version = "0.1", default-features = false, features = ["std"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "f2eb3b2bebffe75df06f3e55f2413728e7e19f2c", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] }
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "d33457c20c2b15d6a934e5b37ac9eb0d17e29145", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] }
|
||||
digest = { version = "0.10", features = ["alloc", "block-buffer", "core-api", "mac", "std", "subtle"] }
|
||||
either = { version = "1", features = ["use_std"] }
|
||||
fixedbitset = { version = "0.4", features = ["std"] }
|
||||
|
|
Loading…
Reference in New Issue