fix: Rename data_types2 to data_types

pull/24376/head
Carol (Nichols || Goulding) 2022-05-05 15:29:24 -04:00
parent e1bef1c218
commit 068096e7e1
No known key found for this signature in database
GPG Key ID: E907EE5A736F87D4
148 changed files with 404 additions and 449 deletions

54
Cargo.lock generated
View File

@ -812,7 +812,7 @@ dependencies = [
"async-trait",
"backoff 0.1.0",
"bytes",
"data_types2",
"data_types",
"datafusion 0.1.0",
"futures",
"iox_catalog",
@ -1149,7 +1149,7 @@ dependencies = [
]
[[package]]
name = "data_types2"
name = "data_types"
version = "0.1.0"
dependencies = [
"influxdb_line_protocol",
@ -1392,7 +1392,7 @@ name = "dml"
version = "0.1.0"
dependencies = [
"arrow_util",
"data_types2",
"data_types",
"hashbrown 0.12.0",
"iox_time",
"mutable_batch",
@ -1714,7 +1714,7 @@ name = "generated_types"
version = "0.1.0"
dependencies = [
"bytes",
"data_types2",
"data_types",
"datafusion 0.1.0",
"iox_time",
"num_cpus",
@ -2146,7 +2146,7 @@ dependencies = [
"compactor",
"console-subscriber",
"csv",
"data_types2",
"data_types",
"datafusion 0.1.0",
"dml",
"dotenv",
@ -2313,7 +2313,7 @@ dependencies = [
"bitflags",
"bytes",
"chrono",
"data_types2",
"data_types",
"datafusion 0.1.0",
"datafusion_util",
"dml",
@ -2385,7 +2385,7 @@ version = "0.1.0"
dependencies = [
"assert_matches",
"async-trait",
"data_types2",
"data_types",
"dotenv",
"futures",
"iox_time",
@ -2411,7 +2411,7 @@ name = "iox_catalog_service"
version = "0.1.0"
dependencies = [
"async-trait",
"data_types2",
"data_types",
"generated_types",
"iox_catalog",
"iox_time",
@ -2489,7 +2489,7 @@ version = "0.1.0"
dependencies = [
"async-trait",
"bytes",
"data_types2",
"data_types",
"futures",
"generated_types",
"iox_catalog",
@ -2514,7 +2514,7 @@ version = "0.1.0"
dependencies = [
"arrow",
"bytes",
"data_types2",
"data_types",
"datafusion 0.1.0",
"iox_catalog",
"iox_time",
@ -2546,7 +2546,7 @@ dependencies = [
"chrono",
"clap 3.1.12",
"clap_blocks",
"data_types2",
"data_types",
"dml",
"flate2",
"futures",
@ -2586,7 +2586,7 @@ dependencies = [
"async-trait",
"clap_blocks",
"compactor",
"data_types2",
"data_types",
"generated_types",
"hyper",
"iox_catalog",
@ -2614,7 +2614,7 @@ version = "0.1.0"
dependencies = [
"async-trait",
"clap_blocks",
"data_types2",
"data_types",
"generated_types",
"hyper",
"ingester",
@ -2644,7 +2644,7 @@ version = "0.1.0"
dependencies = [
"arrow-flight",
"async-trait",
"data_types2",
"data_types",
"generated_types",
"hyper",
"iox_catalog",
@ -2677,7 +2677,7 @@ dependencies = [
"arrow-flight",
"async-trait",
"clap_blocks",
"data_types2",
"data_types",
"generated_types",
"hashbrown 0.12.0",
"hyper",
@ -3278,7 +3278,7 @@ dependencies = [
"arrow",
"arrow_util",
"chrono",
"data_types2",
"data_types",
"hashbrown 0.12.0",
"iox_time",
"itertools",
@ -3774,7 +3774,7 @@ dependencies = [
"arrow_util",
"base64 0.13.0",
"bytes",
"data_types2",
"data_types",
"datafusion 0.1.0",
"datafusion_util",
"futures",
@ -4032,7 +4032,7 @@ version = "0.1.0"
dependencies = [
"arrow",
"chrono",
"data_types2",
"data_types",
"datafusion 0.1.0",
"datafusion_util",
"itertools",
@ -4240,7 +4240,7 @@ dependencies = [
"bytes",
"client_util",
"criterion",
"data_types2",
"data_types",
"datafusion 0.1.0",
"datafusion_util",
"futures",
@ -4284,7 +4284,7 @@ dependencies = [
"async-trait",
"chrono",
"croaring",
"data_types2",
"data_types",
"datafusion 0.1.0",
"datafusion_util",
"executor",
@ -4334,7 +4334,7 @@ dependencies = [
"arrow_util",
"async-trait",
"backoff 0.1.0",
"data_types2",
"data_types",
"datafusion 0.1.0",
"dml",
"futures",
@ -4492,7 +4492,7 @@ dependencies = [
"arrow_util",
"criterion",
"croaring",
"data_types2",
"data_types",
"datafusion 0.1.0",
"either",
"hashbrown 0.12.0",
@ -4637,7 +4637,7 @@ dependencies = [
"async-trait",
"bytes",
"criterion",
"data_types2",
"data_types",
"dml",
"flate2",
"futures",
@ -5167,7 +5167,7 @@ dependencies = [
"arrow",
"arrow-flight",
"bytes",
"data_types2",
"data_types",
"datafusion 0.1.0",
"futures",
"generated_types",
@ -5190,7 +5190,7 @@ version = "0.1.0"
dependencies = [
"arrow",
"async-trait",
"data_types2",
"data_types",
"datafusion 0.1.0",
"futures",
"generated_types",
@ -6759,7 +6759,7 @@ name = "write_buffer"
version = "0.1.0"
dependencies = [
"async-trait",
"data_types2",
"data_types",
"dml",
"dotenv",
"futures",
@ -6793,7 +6793,7 @@ name = "write_summary"
version = "0.1.0"
dependencies = [
"base64 0.13.0",
"data_types2",
"data_types",
"dml",
"generated_types",
"iox_time",

View File

@ -6,7 +6,7 @@ members = [
"clap_blocks",
"client_util",
"compactor",
"data_types2",
"data_types",
"datafusion",
"datafusion_util",
"dml",

View File

@ -9,7 +9,7 @@ arrow = { version = "13", features = ["prettyprint"] }
async-trait = "0.1.53"
backoff = { path = "../backoff" }
bytes = "1.0"
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
datafusion = { path = "../datafusion" }
futures = "0.3"
iox_catalog = { path = "../iox_catalog" }

View File

@ -1,15 +1,17 @@
//! Data Points for the lifecycle of the Compactor
use crate::handler::CompactorConfig;
use crate::utils::GroupWithMinTimeAndSize;
use crate::{
handler::CompactorConfig,
query::QueryableParquetChunk,
utils::{CatalogUpdate, CompactedData, GroupWithTombstones, ParquetFileWithTombstone},
utils::{
CatalogUpdate, CompactedData, GroupWithMinTimeAndSize, GroupWithTombstones,
ParquetFileWithTombstone,
},
};
use arrow::record_batch::RecordBatch;
use backoff::{Backoff, BackoffConfig};
use bytes::Bytes;
use data_types2::{
use data_types::{
ParquetFile, ParquetFileId, ParquetFileWithMetadata, PartitionId, SequencerId, TableId,
TablePartition, Timestamp, Tombstone, TombstoneId,
};
@ -23,10 +25,10 @@ use parquet_file::{
metadata::{IoxMetadata, IoxParquetMetaData},
ParquetFilePath,
};
use query::provider::overlap::group_potential_duplicates;
use query::{
exec::{Executor, ExecutorType},
frontend::reorg::ReorgPlanner,
provider::overlap::group_potential_duplicates,
util::compute_timenanosecond_min_max,
QueryChunk,
};
@ -1085,7 +1087,7 @@ pub struct PartitionCompactionCandidate {
mod tests {
use super::*;
use arrow_util::assert_batches_sorted_eq;
use data_types2::{ChunkId, KafkaPartition, NamespaceId, ParquetFileParams, SequenceNumber};
use data_types::{ChunkId, KafkaPartition, NamespaceId, ParquetFileParams, SequenceNumber};
use iox_catalog::interface::INITIAL_COMPACTION_LEVEL;
use iox_tests::util::TestCatalog;
use iox_time::SystemProvider;

View File

@ -2,7 +2,7 @@
//! no longer needed because they've been compacted and they're old enough to no longer be used by
//! any queriers.
use data_types2::Timestamp;
use data_types::Timestamp;
use iox_catalog::interface::Catalog;
use iox_time::TimeProvider;
use object_store::DynObjectStore;
@ -93,7 +93,7 @@ impl GarbageCollector {
#[cfg(test)]
mod tests {
use super::*;
use data_types2::{KafkaPartition, ParquetFile, ParquetFileParams, SequenceNumber};
use data_types::{KafkaPartition, ParquetFile, ParquetFileParams, SequenceNumber};
use iox_catalog::interface::INITIAL_COMPACTION_LEVEL;
use iox_tests::util::TestCatalog;
use object_store::ObjectStoreTestConvenience;

View File

@ -2,7 +2,7 @@
use async_trait::async_trait;
use backoff::{Backoff, BackoffConfig};
use data_types2::SequencerId;
use data_types::SequencerId;
use futures::{
future::{BoxFuture, Shared},
select, FutureExt, TryFutureExt,

View File

@ -1,6 +1,6 @@
//! Queryable Compactor Data
use data_types2::{
use data_types::{
ChunkAddr, ChunkId, ChunkOrder, DeletePredicate, PartitionId, SequenceNumber, TableSummary,
Timestamp, TimestampMinMax, Tombstone,
};

View File

@ -2,7 +2,7 @@
use crate::query::QueryableParquetChunk;
use arrow::record_batch::RecordBatch;
use data_types2::{
use data_types::{
ParquetFileId, ParquetFileParams, ParquetFileWithMetadata, Timestamp, Tombstone, TombstoneId,
};
use object_store::DynObjectStore;

View File

@ -1,5 +1,5 @@
[package]
name = "data_types2"
name = "data_types"
version = "0.1.0"
edition = "2021"
description = "Shared data types in the IOx NG architecture"

View File

@ -1313,7 +1313,7 @@ pub enum DatabaseNameError {
/// that is expecting a `str`:
///
/// ```rust
/// # use data_types2::DatabaseName;
/// # use data_types::DatabaseName;
/// fn print_database(s: &str) {
/// println!("database name: {}", s);
/// }

View File

@ -6,7 +6,7 @@ description = "DML types"
[dependencies]
arrow_util = { path = "../arrow_util" }
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
hashbrown = "0.12"
mutable_batch = { path = "../mutable_batch" }
ordered-float = "3"

View File

@ -11,7 +11,7 @@
clippy::clone_on_ref_ptr
)]
use data_types2::{DeletePredicate, NonEmptyString, Sequence, StatValues, Statistics};
use data_types::{DeletePredicate, NonEmptyString, Sequence, StatValues, Statistics};
use hashbrown::HashMap;
use iox_time::Time;
use mutable_batch::MutableBatch;

View File

@ -6,7 +6,7 @@ edition = "2021"
[dependencies] # In alphabetical order
bytes = "1.0"
data_types2 = { path = "../data_types2", optional = true }
data_types = { path = "../data_types", optional = true }
datafusion = { path = "../datafusion", optional = true }
observability_deps = { path = "../observability_deps" }
pbjson = "0.3"
@ -30,4 +30,4 @@ pbjson-build = "0.3"
[features]
default = ["data_types_conversions"]
data_types_conversions = ["data_types2", "datafusion", "predicate"]
data_types_conversions = ["data_types", "datafusion", "predicate"]

View File

@ -1,8 +1,9 @@
//! Code to serialize and deserialize certain expressions.
//!
//! Note that [Ballista] also provides a serialization using [Protocol Buffers 3]. However the protocol is meant as a
//! communication channel between workers and clients of Ballista, not for long term preservation. For IOx we need a
//! more stable solution. Luckily we only need to support a very small subset of expression.
//! Note that [Ballista] also provides a serialization using [Protocol Buffers 3]. However the
//! protocol is meant as a communication channel between workers and clients of Ballista, not for
//! long term preservation. For IOx we need a more stable solution. Luckily we only need to support
//! a very small subset of expression.
//!
//! [Ballista]: https://github.com/apache/arrow-datafusion/blob/22fcb3d7a68a56afbe12eab9e7d98f7b8de33703/ballista/rust/core/proto/ballista.proto
//! [Protocol Buffers 3]: https://developers.google.com/protocol-buffers/docs/proto3
@ -11,7 +12,7 @@ use crate::google::{FieldViolation, FromOptionalField, FromRepeatedField, Option
use crate::influxdata::iox::predicate::v1 as proto;
use crate::influxdata::iox::predicate::v1::scalar::Value;
use crate::influxdata::iox::predicate::v1::{Expr, Predicate};
use data_types2::{DeleteExpr, DeletePredicate, Op, Scalar, TimestampRange};
use data_types::{DeleteExpr, DeletePredicate, Op, Scalar, TimestampRange};
impl From<DeletePredicate> for proto::Predicate {
fn from(predicate: DeletePredicate) -> Self {

View File

@ -1,5 +1,5 @@
use crate::{google::FieldViolation, influxdata::iox::ingester::v1 as proto};
use data_types2::TimestampRange;
use data_types::TimestampRange;
use datafusion::{
common::DataFusionError, datafusion_proto::bytes::Serializeable, logical_plan::Expr,
};

View File

@ -9,7 +9,7 @@ default-run = "influxdb_iox"
# Workspace dependencies, in alphabetical order
clap_blocks = { path = "../clap_blocks" }
compactor = { path = "../compactor" }
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
datafusion = { path = "../datafusion" }
dml = { path = "../dml" }
generated_types = { path = "../generated_types" }

View File

@ -2,7 +2,7 @@
use bytes::Bytes;
use clap_blocks::{catalog_dsn::CatalogDsnConfig, object_store::ObjectStoreConfig};
use data_types2::{
use data_types::{
ColumnType, KafkaPartition, NamespaceId, NamespaceSchema as CatalogNamespaceSchema,
ParquetFile as CatalogParquetFile, ParquetFileParams, PartitionId, SequenceNumber, SequencerId,
TableId, Timestamp,
@ -370,7 +370,7 @@ struct PartitionMapping {
#[cfg(test)]
mod tests {
use super::*;
use data_types2::{ColumnType, ParquetFileId};
use data_types::{ColumnType, ParquetFileId};
use influxdb_iox_client::schema::generated_types::*;
use iox_catalog::mem::MemCatalog;
use std::collections::HashMap;

View File

@ -14,7 +14,7 @@ base64 = "0.13"
bytes = "1.0"
datafusion = { path = "../datafusion" }
datafusion_util = { path = "../datafusion_util" }
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
futures = "0.3"
generated_types = { path = "../generated_types" }
chrono = { version = "0.4", default-features = false }

View File

@ -2,7 +2,7 @@
use crate::data::{PersistingBatch, QueryableBatch};
use arrow::record_batch::RecordBatch;
use data_types2::{NamespaceId, PartitionInfo};
use data_types::{NamespaceId, PartitionInfo};
use datafusion::{error::DataFusionError, physical_plan::SendableRecordBatchStream};
use iox_catalog::interface::INITIAL_COMPACTION_LEVEL;
use iox_time::{Time, TimeProvider};
@ -178,7 +178,7 @@ mod tests {
make_persisting_batch, make_queryable_batch, make_queryable_batch_with_deletes,
};
use arrow_util::assert_batches_eq;
use data_types2::{Partition, PartitionId, SequencerId, TableId};
use data_types::{Partition, PartitionId, SequencerId, TableId};
use iox_time::SystemProvider;
use mutable_batch_lp::lines_to_batches;
use schema::selection::Selection;

View File

@ -10,7 +10,7 @@ use crate::{
use arrow::record_batch::RecordBatch;
use async_trait::async_trait;
use backoff::{Backoff, BackoffConfig};
use data_types2::{
use data_types::{
DeletePredicate, KafkaPartition, NamespaceId, PartitionId, PartitionInfo, SequenceNumber,
SequencerId, TableId, Timestamp, Tombstone,
};
@ -1519,7 +1519,7 @@ mod tests {
};
use arrow_util::assert_batches_sorted_eq;
use assert_matches::assert_matches;
use data_types2::{
use data_types::{
NamespaceSchema, NonEmptyString, ParquetFileParams, Sequence, TimestampRange,
};
use dml::{DmlDelete, DmlMeta, DmlWrite};

View File

@ -13,7 +13,7 @@ use crate::{
};
use async_trait::async_trait;
use backoff::BackoffConfig;
use data_types2::{KafkaPartition, KafkaTopic, Sequencer};
use data_types::{KafkaPartition, KafkaTopic, Sequencer};
use futures::{
future::{BoxFuture, Shared},
stream::FuturesUnordered,
@ -305,7 +305,7 @@ impl Drop for IngestHandlerImpl {
#[cfg(test)]
mod tests {
use super::*;
use data_types2::{Namespace, NamespaceSchema, QueryPool, Sequence, SequenceNumber};
use data_types::{Namespace, NamespaceSchema, QueryPool, Sequence, SequenceNumber};
use dml::{DmlMeta, DmlWrite};
use iox_catalog::{mem::MemCatalog, validate_or_insert_schema};
use iox_time::Time;

View File

@ -1,4 +1,4 @@
use data_types2::PartitionId;
use data_types::PartitionId;
use iox_time::TimeProvider;
use parking_lot::Mutex;
use std::sync::Arc;

View File

@ -10,7 +10,7 @@ use crate::{
job::{Job, JobRegistry},
poison::{PoisonCabinet, PoisonPill},
};
use data_types2::{PartitionId, SequenceNumber, SequencerId};
use data_types::{PartitionId, SequenceNumber, SequencerId};
use iox_time::{Time, TimeProvider};
use metric::{Metric, U64Counter};
use observability_deps::tracing::{error, info};

View File

@ -81,7 +81,7 @@ pub async fn persist(
#[cfg(test)]
mod tests {
use super::*;
use data_types2::{NamespaceId, PartitionId, SequenceNumber, SequencerId, TableId};
use data_types::{NamespaceId, PartitionId, SequenceNumber, SequencerId, TableId};
use iox_catalog::interface::INITIAL_COMPACTION_LEVEL;
use iox_time::Time;
use object_store::{ObjectStoreImpl, ObjectStoreTestConvenience};

View File

@ -1,4 +1,4 @@
use data_types2::KafkaPartition;
use data_types::KafkaPartition;
use futures::Future;
use parking_lot::{RwLock, RwLockUpgradableReadGuard};
use pin_project::pin_project;

View File

@ -322,7 +322,7 @@ mod tests {
};
use arrow_util::{assert_batches_eq, assert_batches_sorted_eq};
use assert_matches::assert_matches;
use data_types2::PartitionId;
use data_types::PartitionId;
use datafusion::logical_plan::{col, lit};
use predicate::PredicateBuilder;

View File

@ -3,7 +3,7 @@
use crate::data::{QueryableBatch, SnapshotBatch};
use arrow::record_batch::RecordBatch;
use arrow_util::util::merge_record_batches;
use data_types2::{
use data_types::{
ChunkAddr, ChunkId, ChunkOrder, DeletePredicate, PartitionId, SequenceNumber, TableSummary,
TimestampMinMax, Tombstone,
};
@ -313,7 +313,7 @@ mod tests {
datatypes::{DataType, Int32Type, TimeUnit},
};
use arrow_util::assert_batches_eq;
use data_types2::{DeleteExpr, Op, Scalar, TimestampRange};
use data_types::{DeleteExpr, Op, Scalar, TimestampRange};
use datafusion::logical_plan::{col, lit};
use predicate::PredicateBuilder;

View File

@ -1,18 +1,15 @@
use std::{fmt::Debug, time::Duration};
use data_types2::KafkaPartition;
use super::DmlSink;
use crate::lifecycle::{LifecycleHandle, LifecycleHandleImpl};
use data_types::KafkaPartition;
use dml::DmlOperation;
use futures::{pin_mut, FutureExt, Stream, StreamExt};
use iox_time::{SystemProvider, TimeProvider};
use metric::{Attributes, U64Counter, U64Gauge};
use observability_deps::tracing::*;
use std::{fmt::Debug, time::Duration};
use tokio_util::sync::CancellationToken;
use write_buffer::core::{WriteBufferError, WriteBufferErrorKind};
use crate::lifecycle::{LifecycleHandle, LifecycleHandleImpl};
use super::DmlSink;
/// When the [`LifecycleManager`] indicates that ingest should be paused because
/// of memory pressure, the sequencer will loop, sleeping this long between
/// calls to [`LifecycleHandle::can_resume_ingest()`] with the manager if it
@ -378,7 +375,7 @@ mod tests {
stream_handler::mock_sink::MockDmlSink,
};
use assert_matches::assert_matches;
use data_types2::{DeletePredicate, Sequence, TimestampRange};
use data_types::{DeletePredicate, Sequence, TimestampRange};
use dml::{DmlDelete, DmlMeta, DmlWrite};
use futures::stream;
use iox_time::{SystemProvider, Time};

View File

@ -1,3 +1,7 @@
use super::sink_instrumentation::WatermarkFetcher;
use data_types::KafkaPartition;
use metric::U64Counter;
use observability_deps::tracing::*;
use std::{
sync::{
atomic::{AtomicU64, Ordering},
@ -5,15 +9,9 @@ use std::{
},
time::{Duration, Instant},
};
use data_types2::KafkaPartition;
use metric::U64Counter;
use observability_deps::tracing::*;
use tokio::task::JoinHandle;
use write_buffer::core::WriteBufferReading;
use super::sink_instrumentation::WatermarkFetcher;
/// Periodically fetch and cache the maximum known write buffer offset
/// (watermark) from the write buffer for a given sequencer.
///
@ -187,7 +185,7 @@ impl WatermarkFetcher for PeriodicWatermarkFetcher {
#[cfg(test)]
mod tests {
use data_types2::Sequence;
use data_types::Sequence;
use metric::{Attributes, Metric};
use test_helpers::timeout::FutureTimeout;
use write_buffer::mock::{

View File

@ -1,14 +1,11 @@
//! Compatibility layer providing a [`DmlSink`] impl for [`IngesterData`].
use std::sync::Arc;
use async_trait::async_trait;
use data_types2::SequencerId;
use dml::DmlOperation;
use crate::{data::IngesterData, lifecycle::LifecycleHandleImpl};
use super::DmlSink;
use crate::{data::IngesterData, lifecycle::LifecycleHandleImpl};
use async_trait::async_trait;
use data_types::SequencerId;
use dml::DmlOperation;
use std::sync::Arc;
/// Provides a [`DmlSink`] implementation for a [`IngesterData`] instance.
#[derive(Debug)]

View File

@ -1,15 +1,14 @@
//! Instrumentation for [`DmlSink`] implementations.
use std::fmt::Debug;
use super::DmlSink;
use async_trait::async_trait;
use data_types2::KafkaPartition;
use data_types::KafkaPartition;
use dml::DmlOperation;
use iox_time::{SystemProvider, TimeProvider};
use metric::{Attributes, U64Counter, U64Gauge, U64Histogram, U64HistogramOptions};
use std::fmt::Debug;
use trace::span::SpanRecorder;
use super::DmlSink;
/// A [`WatermarkFetcher`] abstracts a source of the write buffer high watermark
/// (max known offset).
///
@ -255,7 +254,7 @@ mod tests {
use std::sync::Arc;
use assert_matches::assert_matches;
use data_types2::Sequence;
use data_types::Sequence;
use dml::{DmlMeta, DmlWrite};
use iox_time::Time;
use metric::{Metric, MetricObserver, Observation};

View File

@ -1,4 +1,5 @@
//! Test setups and data for ingester crate
#![allow(missing_docs)]
use crate::{
@ -11,7 +12,7 @@ use crate::{
use arrow::record_batch::RecordBatch;
use arrow_util::assert_batches_eq;
use bitflags::bitflags;
use data_types2::{
use data_types::{
KafkaPartition, NamespaceId, PartitionId, SequenceNumber, SequencerId, TableId, Timestamp,
Tombstone, TombstoneId,
};

View File

@ -7,15 +7,15 @@ edition = "2021"
[dependencies] # In alphabetical order
assert_matches = "1.5.0"
async-trait = "0.1.53"
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
futures = "0.3"
iox_time = { version = "0.1.0", path = "../iox_time" }
metric = { version = "0.1.0", path = "../metric" }
mutable_batch = { path = "../mutable_batch" }
observability_deps = { path = "../observability_deps" }
snafu = "0.7"
sqlx = { version = "0.5", features = [ "runtime-tokio-rustls" , "postgres", "uuid" ] }
sqlx-hotswap-pool = { path = "../sqlx-hotswap-pool" }
iox_time = { version = "0.1.0", path = "../iox_time" }
tokio = { version = "1.18", features = ["io-util", "macros", "parking_lot", "rt-multi-thread", "time"] }
uuid = { version = "0.8", features = ["v4"] }
workspace-hack = { path = "../workspace-hack"}
@ -28,5 +28,3 @@ pretty_assertions = "1.2.1"
rand = "0.8"
tempfile = "3"
test_helpers = { path = "../test_helpers" }
[features]

View File

@ -1,7 +1,7 @@
//! This module contains the traits and data objects for the Catalog API.
use async_trait::async_trait;
use data_types2::{
use data_types::{
Column, ColumnSchema, ColumnType, KafkaPartition, KafkaTopic, KafkaTopicId, Namespace,
NamespaceId, NamespaceSchema, ParquetFile, ParquetFileId, ParquetFileParams,
ParquetFileWithMetadata, Partition, PartitionId, PartitionInfo, ProcessedTombstone, QueryPool,
@ -773,7 +773,7 @@ pub(crate) mod test_helpers {
use super::*;
use ::test_helpers::{assert_contains, tracing::TracingCapture};
use data_types2::ColumnId;
use data_types::ColumnId;
use metric::{Attributes, Metric, U64Histogram};
use std::{
ops::{Add, DerefMut},

View File

@ -11,12 +11,11 @@
clippy::clone_on_ref_ptr
)]
use crate::interface::{Error, Result, Transaction};
use data_types2::{
use crate::interface::{ColumnUpsertRequest, Error, RepoCollection, Result, Transaction};
use data_types::{
ColumnType, KafkaPartition, KafkaTopic, NamespaceSchema, QueryPool, Sequencer, SequencerId,
TableSchema,
};
use interface::{ColumnUpsertRequest, RepoCollection};
use mutable_batch::MutableBatch;
use std::{borrow::Cow, collections::BTreeMap};

View File

@ -11,7 +11,7 @@ use crate::{
metrics::MetricDecorator,
};
use async_trait::async_trait;
use data_types2::{
use data_types::{
Column, ColumnId, ColumnType, KafkaPartition, KafkaTopic, KafkaTopicId, Namespace, NamespaceId,
ParquetFile, ParquetFileId, ParquetFileParams, ParquetFileWithMetadata, Partition, PartitionId,
PartitionInfo, ProcessedTombstone, QueryPool, QueryPoolId, SequenceNumber, Sequencer,

View File

@ -6,7 +6,7 @@ use crate::interface::{
SequencerRepo, TablePersistInfo, TableRepo, TombstoneRepo,
};
use async_trait::async_trait;
use data_types2::{
use data_types::{
Column, ColumnType, KafkaPartition, KafkaTopic, KafkaTopicId, Namespace, NamespaceId,
ParquetFile, ParquetFileId, ParquetFileParams, ParquetFileWithMetadata, Partition, PartitionId,
PartitionInfo, ProcessedTombstone, QueryPool, QueryPoolId, SequenceNumber, Sequencer,

View File

@ -10,7 +10,7 @@ use crate::{
metrics::MetricDecorator,
};
use async_trait::async_trait;
use data_types2::{
use data_types::{
Column, ColumnType, KafkaPartition, KafkaTopic, KafkaTopicId, Namespace, NamespaceId,
ParquetFile, ParquetFileId, ParquetFileParams, ParquetFileWithMetadata, Partition, PartitionId,
PartitionInfo, ProcessedTombstone, QueryPool, QueryPoolId, SequenceNumber, Sequencer,
@ -18,8 +18,9 @@ use data_types2::{
};
use iox_time::{SystemProvider, TimeProvider};
use observability_deps::tracing::{info, warn};
use sqlx::types::Uuid;
use sqlx::{migrate::Migrator, postgres::PgPoolOptions, Acquire, Executor, Postgres, Row};
use sqlx::{
migrate::Migrator, postgres::PgPoolOptions, types::Uuid, Acquire, Executor, Postgres, Row,
};
use sqlx_hotswap_pool::HotSwapPool;
use std::{sync::Arc, time::Duration};

View File

@ -7,7 +7,7 @@ edition = "2021"
[dependencies]
async-trait = "0.1"
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
generated_types = { path = "../generated_types" }
iox_catalog = { path = "../iox_catalog" }
observability_deps = { path = "../observability_deps" }

View File

@ -11,7 +11,7 @@
clippy::clone_on_ref_ptr
)]
use data_types2::{PartitionId, TableId};
use data_types::{PartitionId, TableId};
use generated_types::influxdata::iox::catalog::v1::*;
use iox_catalog::interface::Catalog;
use observability_deps::tracing::*;
@ -81,7 +81,7 @@ impl catalog_service_server::CatalogService for CatalogService {
}
// converts the catalog ParquetFile to protobuf
fn to_parquet_file(p: data_types2::ParquetFile) -> ParquetFile {
fn to_parquet_file(p: data_types::ParquetFile) -> ParquetFile {
ParquetFile {
id: p.id.get(),
sequencer_id: p.sequencer_id.get(),
@ -102,7 +102,7 @@ fn to_parquet_file(p: data_types2::ParquetFile) -> ParquetFile {
}
// converts the catalog Partition to protobuf
fn to_partition(p: data_types2::Partition) -> Partition {
fn to_partition(p: data_types::Partition) -> Partition {
Partition {
id: p.id.get(),
sequencer_id: p.sequencer_id.get(),
@ -115,7 +115,7 @@ fn to_partition(p: data_types2::Partition) -> Partition {
#[cfg(test)]
mod tests {
use super::*;
use data_types2::{KafkaPartition, ParquetFileParams, SequenceNumber, Timestamp};
use data_types::{KafkaPartition, ParquetFileParams, SequenceNumber, Timestamp};
use generated_types::influxdata::iox::catalog::v1::catalog_service_server::CatalogService;
use iox_catalog::mem::MemCatalog;
use uuid::Uuid;

View File

@ -3,20 +3,18 @@ name = "iox_object_store_service"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
async-trait = "0.1"
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
futures = "0.3"
generated_types = { path = "../generated_types" }
iox_catalog = { path = "../iox_catalog" }
iox_time = { path = "../iox_time" }
object_store = { path = "../object_store" }
observability_deps = { path = "../observability_deps" }
parquet_file = { path = "../parquet_file" }
serde = "1.0"
serde_urlencoded = "0.7"
iox_time = { path = "../iox_time" }
tokio = { version = "1", features = ["rt-multi-thread", "macros"] }
tokio-stream = "0.1"
tonic = "0.7"

View File

@ -12,8 +12,7 @@
clippy::clone_on_ref_ptr
)]
use futures::stream::BoxStream;
use futures::StreamExt;
use futures::{stream::BoxStream, StreamExt};
use generated_types::influxdata::iox::object_store::v1::*;
use iox_catalog::interface::Catalog;
use object_store::DynObjectStore;
@ -96,7 +95,7 @@ impl object_store_service_server::ObjectStoreService for ObjectStoreService {
mod tests {
use super::*;
use bytes::Bytes;
use data_types2::{KafkaPartition, ParquetFileParams, SequenceNumber, Timestamp};
use data_types::{KafkaPartition, ParquetFileParams, SequenceNumber, Timestamp};
use generated_types::influxdata::iox::object_store::v1::object_store_service_server::ObjectStoreService;
use iox_catalog::mem::MemCatalog;
use object_store::{ObjectStoreApi, ObjectStoreImpl};

View File

@ -8,15 +8,15 @@ description = "IOx NG test utils and tests"
[dependencies]
arrow = "13"
bytes = "1.0"
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
datafusion = { path = "../datafusion" }
iox_catalog = { path = "../iox_catalog" }
iox_time = { path = "../iox_time" }
metric = { path = "../metric" }
mutable_batch_lp = { path = "../mutable_batch_lp" }
object_store = { path = "../object_store" }
parquet_file = { path = "../parquet_file" }
query = { path = "../query" }
schema = { path = "../schema" }
iox_time = { path = "../iox_time" }
uuid = { version = "0.8", features = ["v4"] }
workspace-hack = { path = "../workspace-hack"}

View File

@ -5,7 +5,7 @@ use arrow::{
record_batch::RecordBatch,
};
use bytes::Bytes;
use data_types2::{
use data_types::{
Column, ColumnType, KafkaPartition, KafkaTopic, Namespace, ParquetFile, ParquetFileId,
ParquetFileParams, ParquetFileWithMetadata, Partition, PartitionId, QueryPool, SequenceNumber,
Sequencer, SequencerId, Table, TableId, Timestamp, Tombstone, TombstoneId,

View File

@ -3,14 +3,12 @@ name = "ioxd_common"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
# Optional feature 'pprof' enables http://localhost:8080/debug/pprof/profile support support
[dependencies]
# Workspace dependencies, in alphabetical order
clap_blocks = { path = "../clap_blocks" }
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
dml = { path = "../dml" }
metric = { path = "../metric" }
observability_deps = { path = "../observability_deps" }

View File

@ -1,10 +1,11 @@
use std::sync::Arc;
use super::{
error::{HttpApiError, HttpApiErrorExt, HttpApiErrorSource},
metrics::LineProtocolMetrics,
};
use crate::{http::utils::parse_body, server_type::ServerType};
use async_trait::async_trait;
use chrono::Utc;
use data_types2::{
org_and_bucket_to_database, DatabaseName, NonEmptyString, OrgBucketMappingError,
};
use data_types::{org_and_bucket_to_database, DatabaseName, NonEmptyString, OrgBucketMappingError};
use dml::{DmlDelete, DmlMeta, DmlOperation, DmlWrite};
use hyper::{Body, Method, Request, Response, StatusCode};
use mutable_batch_lp::LinesConverter;
@ -12,13 +13,7 @@ use observability_deps::tracing::debug;
use predicate::delete_predicate::{parse_delete_predicate, parse_http_delete_request};
use serde::Deserialize;
use snafu::{OptionExt, ResultExt, Snafu};
use crate::{http::utils::parse_body, server_type::ServerType};
use super::{
error::{HttpApiError, HttpApiErrorExt, HttpApiErrorSource},
metrics::LineProtocolMetrics,
};
use std::sync::Arc;
#[allow(clippy::large_enum_variant)]
#[derive(Debug, Snafu)]

View File

@ -9,7 +9,7 @@ edition = "2021"
# Workspace dependencies, in alphabetical order
clap_blocks = { path = "../clap_blocks" }
compactor = { path = "../compactor" }
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
generated_types = { path = "../generated_types" }
iox_catalog = { path = "../iox_catalog" }
ioxd_common = { path = "../ioxd_common" }

View File

@ -1,23 +1,13 @@
use std::{
fmt::{Debug, Display},
sync::Arc,
};
use async_trait::async_trait;
use clap_blocks::compactor::CompactorConfig;
use compactor::{
handler::{CompactorHandler, CompactorHandlerImpl},
server::CompactorServer,
};
use data_types2::KafkaPartition;
use data_types::KafkaPartition;
use hyper::{Body, Request, Response};
use iox_catalog::interface::Catalog;
use iox_time::TimeProvider;
use metric::Registry;
use object_store::DynObjectStore;
use query::exec::Executor;
use trace::TraceCollector;
use clap_blocks::compactor::CompactorConfig;
use ioxd_common::{
add_service,
http::error::{HttpApiError, HttpApiErrorCode, HttpApiErrorSource},
@ -26,7 +16,15 @@ use ioxd_common::{
server_type::{CommonServerState, RpcError, ServerType},
setup_builder,
};
use metric::Registry;
use object_store::DynObjectStore;
use query::exec::Executor;
use std::{
fmt::{Debug, Display},
sync::Arc,
};
use thiserror::Error;
use trace::TraceCollector;
#[derive(Debug, Error)]
pub enum Error {

View File

@ -3,21 +3,19 @@ name = "ioxd_ingester"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# Workspace dependencies, in alphabetical order
clap_blocks = { path = "../clap_blocks" }
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
generated_types = { path = "../generated_types" }
ingester = { path = "../ingester" }
iox_catalog = { path = "../iox_catalog" }
iox_time = { path = "../iox_time" }
ioxd_common = { path = "../ioxd_common" }
metric = { path = "../metric" }
object_store = { path = "../object_store" }
query = { path = "../query" }
service_grpc_testing = { path = "../service_grpc_testing" }
iox_time = { path = "../iox_time" }
trace = { path = "../trace" }
trace_http = { path = "../trace_http" }
write_buffer = { path = "../write_buffer" }

View File

@ -1,26 +1,13 @@
use std::{
collections::BTreeMap,
fmt::{Debug, Display},
sync::Arc,
time::Duration,
};
use async_trait::async_trait;
use clap_blocks::{ingester::IngesterConfig, write_buffer::WriteBufferConfig};
use data_types2::KafkaPartition;
use data_types::KafkaPartition;
use hyper::{Body, Request, Response};
use ingester::{
handler::IngestHandlerImpl,
handler::{IngestHandler, IngestHandlerImpl},
lifecycle::LifecycleConfig,
server::{grpc::GrpcDelegate, http::HttpDelegate, IngesterServer},
};
use iox_catalog::interface::Catalog;
use metric::Registry;
use object_store::DynObjectStore;
use query::exec::Executor;
use trace::TraceCollector;
use ingester::handler::IngestHandler;
use ioxd_common::{
add_service,
http::error::{HttpApiError, HttpApiErrorCode, HttpApiErrorSource},
@ -29,7 +16,17 @@ use ioxd_common::{
server_type::{CommonServerState, RpcError, ServerType},
setup_builder,
};
use metric::Registry;
use object_store::DynObjectStore;
use query::exec::Executor;
use std::{
collections::BTreeMap,
fmt::{Debug, Display},
sync::Arc,
time::Duration,
};
use thiserror::Error;
use trace::TraceCollector;
#[derive(Debug, Error)]
pub enum Error {

View File

@ -7,7 +7,7 @@ edition = "2021"
[dependencies]
# Workspace dependencies, in alphabetical order
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
generated_types = { path = "../generated_types" }
iox_catalog = { path = "../iox_catalog" }
ioxd_common = { path = "../ioxd_common" }

View File

@ -1,9 +1,9 @@
//! NamespaceService gRPC implementation
use std::sync::Arc;
use data_types2::Namespace;
use data_types::Namespace;
use generated_types::influxdata::iox::namespace::v1 as proto;
use querier::QuerierDatabase;
use std::sync::Arc;
/// Acquire a [`NamespaceService`] gRPC service implementation.
pub fn namespace_service(

View File

@ -3,14 +3,13 @@ name = "ioxd_router2"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# Workspace dependencies, in alphabetical order
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
clap_blocks = { path = "../clap_blocks" }
generated_types = { path = "../generated_types" }
iox_catalog = { path = "../iox_catalog" }
iox_time = { path = "../iox_time" }
ioxd_common = { path = "../ioxd_common" }
metric = { path = "../metric" }
mutable_batch = { path = "../mutable_batch" }
@ -18,7 +17,6 @@ object_store = { path = "../object_store" }
observability_deps = { path = "../observability_deps" }
router2 = { path = "../router2" }
service_grpc_testing = { path = "../service_grpc_testing" }
iox_time = { path = "../iox_time" }
trace = { path = "../trace" }
trace_http = { path = "../trace_http" }
write_buffer = { path = "../write_buffer" }

View File

@ -1,17 +1,20 @@
use std::{
collections::BTreeSet,
fmt::{Debug, Display},
sync::Arc,
};
use async_trait::async_trait;
use clap_blocks::write_buffer::WriteBufferConfig;
use data_types2::{DatabaseName, PartitionTemplate, TemplatePart};
use data_types::{DatabaseName, PartitionTemplate, TemplatePart};
use hashbrown::HashMap;
use hyper::{Body, Request, Response};
use iox_catalog::interface::Catalog;
use ioxd_common::{
add_service,
http::error::{HttpApiError, HttpApiErrorSource},
rpc::RpcBuilderInput,
serve_builder,
server_type::{CommonServerState, RpcError, ServerType},
setup_builder,
};
use metric::Registry;
use mutable_batch::MutableBatch;
use object_store::DynObjectStore;
use observability_deps::tracing::info;
use router2::{
dml_handlers::{
@ -26,21 +29,16 @@ use router2::{
server::{grpc::GrpcDelegate, http::HttpDelegate, RouterServer},
sharder::JumpHash,
};
use std::{
collections::BTreeSet,
fmt::{Debug, Display},
sync::Arc,
};
use thiserror::Error;
use tokio_util::sync::CancellationToken;
use trace::TraceCollector;
use write_summary::WriteSummary;
use ioxd_common::{
add_service,
http::error::{HttpApiError, HttpApiErrorSource},
rpc::RpcBuilderInput,
serve_builder,
server_type::{CommonServerState, RpcError, ServerType},
setup_builder,
};
use object_store::DynObjectStore;
#[derive(Debug, Error)]
pub enum Error {
#[error("failed to initialise write buffer connection: {0}")]
@ -352,7 +350,7 @@ where
#[cfg(test)]
mod tests {
use data_types2::ColumnType;
use data_types::ColumnType;
use iox_catalog::mem::MemCatalog;
use super::*;

View File

@ -8,7 +8,7 @@ description = "A mutable arrow RecordBatch"
arrow = { version = "13", features = ["prettyprint"] }
arrow_util = { path = "../arrow_util" }
chrono = { version = "0.4", default-features = false }
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
iox_time = { path = "../iox_time" }
schema = { path = "../schema" }
snafu = "0.7"

View File

@ -9,7 +9,7 @@ use arrow::{
error::ArrowError,
};
use arrow_util::{bitset::BitSet, string::PackedStringArray};
use data_types2::{StatValues, Statistics};
use data_types::{StatValues, Statistics};
use schema::{InfluxColumnType, InfluxFieldType, TIME_DATA_TYPE};
use snafu::{ResultExt, Snafu};
use std::{fmt::Formatter, mem, sync::Arc};

View File

@ -17,7 +17,7 @@
use crate::column::{Column, ColumnData};
use arrow::record_batch::RecordBatch;
use data_types2::StatValues;
use data_types::StatValues;
use hashbrown::HashMap;
use iox_time::Time;
use schema::selection::Selection;

View File

@ -1,12 +1,10 @@
//! Write payload abstractions derived from [`MutableBatch`]
use crate::column::ColumnData;
use crate::{MutableBatch, Result};
use data_types2::PartitionTemplate;
use crate::{column::ColumnData, MutableBatch, Result};
use data_types::PartitionTemplate;
use hashbrown::HashMap;
use schema::TIME_COLUMN_NAME;
use std::num::NonZeroUsize;
use std::ops::Range;
use std::{num::NonZeroUsize, ops::Range};
mod filter;
mod partition;

View File

@ -2,10 +2,12 @@
//!
//! The returned ranges can then be used with [`MutableBatch::extend_from_range`]
use crate::column::{Column, ColumnData};
use crate::MutableBatch;
use crate::{
column::{Column, ColumnData},
MutableBatch,
};
use chrono::{format::StrftimeItems, TimeZone, Utc};
use data_types2::{PartitionTemplate, TemplatePart};
use data_types::{PartitionTemplate, TemplatePart};
use schema::TIME_COLUMN_NAME;
use std::ops::Range;

View File

@ -1,13 +1,14 @@
//! A panic-safe write abstraction for [`MutableBatch`]
use crate::column::{Column, ColumnData, INVALID_DID};
use crate::MutableBatch;
use crate::{
column::{Column, ColumnData, INVALID_DID},
MutableBatch,
};
use arrow_util::bitset::{iter_set_positions, iter_set_positions_with_offset, BitSet};
use data_types2::{IsNan, StatValues, Statistics};
use data_types::{IsNan, StatValues, Statistics};
use schema::{InfluxColumnType, InfluxFieldType};
use snafu::Snafu;
use std::num::NonZeroU64;
use std::ops::Range;
use std::{num::NonZeroU64, ops::Range};
#[allow(missing_docs, missing_copy_implementations)]
#[derive(Debug, Snafu)]

View File

@ -1,5 +1,5 @@
use arrow_util::assert_batches_eq;
use data_types2::{StatValues, Statistics};
use data_types::{StatValues, Statistics};
use mutable_batch::{writer::Writer, MutableBatch};
use schema::selection::Selection;
use std::{collections::BTreeMap, num::NonZeroU64};

View File

@ -1,5 +1,5 @@
use arrow_util::assert_batches_eq;
use data_types2::{StatValues, Statistics};
use data_types::{StatValues, Statistics};
use mutable_batch::{writer::Writer, MutableBatch};
use schema::selection::Selection;
use std::{collections::BTreeMap, num::NonZeroU64};

View File

@ -1,5 +1,5 @@
use arrow_util::assert_batches_eq;
use data_types2::{StatValues, Statistics};
use data_types::{StatValues, Statistics};
use mutable_batch::{writer::Writer, MutableBatch, TimestampSummary};
use schema::selection::Selection;
use std::num::NonZeroU64;

View File

@ -1,3 +1,11 @@
//! A fuzz test of the [`mutable_batch::Writer`] interface:
//!
//! - column writes - `write_i64`, `write_tag`, etc...
//! - batch writes - `write_batch`
//! - batch writes with ranges - `write_batch_ranges`
//!
//! Verifies that the rows and statistics are as expected after a number of interleaved writes
use arrow::{
array::{
ArrayRef, BooleanArray, Float64Array, Int64Array, StringArray, TimestampNanosecondArray,
@ -6,18 +14,11 @@ use arrow::{
record_batch::RecordBatch,
};
use arrow_util::bitset::BitSet;
use data_types2::{IsNan, PartitionTemplate, StatValues, Statistics, TemplatePart};
use data_types::{IsNan, PartitionTemplate, StatValues, Statistics, TemplatePart};
use hashbrown::HashSet;
use mutable_batch::{writer::Writer, MutableBatch, PartitionWrite, WritePayload};
use rand::prelude::*;
use schema::selection::Selection;
/// A fuzz test of the [`mutable_batch::Writer`] interface:
///
/// - column writes - `write_i64`, `write_tag`, etc...
/// - batch writes - `write_batch`
/// - batch writes with ranges - `write_batch_ranges`
///
/// Verifies that the rows and statistics are as expected after a number of interleaved writes
use std::{collections::BTreeMap, num::NonZeroU64, ops::Range, sync::Arc};
fn make_rng() -> StdRng {

View File

@ -8,25 +8,25 @@ edition = "2021"
arrow = { version = "13", features = ["prettyprint"] }
base64 = "0.13"
bytes = "1.0"
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
datafusion = { path = "../datafusion" }
datafusion_util = { path = "../datafusion_util" }
futures = "0.3"
generated_types = { path = "../generated_types" }
iox_time = { path = "../iox_time" }
metric = { path = "../metric" }
object_store = { path = "../object_store" }
observability_deps = { path = "../observability_deps" }
parking_lot = "0.12"
parquet = {version = "13", features = ["experimental"]}
parquet-format = "4.0"
parking_lot = "0.12"
pbjson-types = "0.3"
predicate = { path = "../predicate" }
prost = "0.10"
snafu = "0.7"
schema = { path = "../schema" }
snafu = "0.7"
tempfile = "3.1.0"
thrift = "0.13"
iox_time = { path = "../iox_time" }
tokio = { version = "1.18", features = ["macros", "parking_lot", "rt", "rt-multi-thread", "sync"] }
tokio-stream = "0.1"
uuid = { version = "0.8", features = ["v4"] }

View File

@ -3,7 +3,7 @@ use crate::{
storage::Storage,
ParquetFilePath,
};
use data_types2::{
use data_types::{
ParquetFile, ParquetFileWithMetadata, Statistics, TableSummary, TimestampMinMax, TimestampRange,
};
use datafusion::physical_plan::SendableRecordBatchStream;

View File

@ -12,7 +12,7 @@ pub mod chunk;
pub mod metadata;
pub mod storage;
use data_types2::{NamespaceId, PartitionId, SequencerId, TableId};
use data_types::{NamespaceId, PartitionId, SequencerId, TableId};
use object_store::{
path::{parsed::DirsAndFileName, ObjectStorePath, Path},
DynObjectStore,

View File

@ -86,7 +86,7 @@
//! [Apache Parquet]: https://parquet.apache.org/
//! [Apache Thrift]: https://thrift.apache.org/
//! [Thrift Compact Protocol]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md
use data_types2::{
use data_types::{
ColumnSummary, InfluxDbType, NamespaceId, ParquetFileParams, PartitionId, SequenceNumber,
SequencerId, StatValues, Statistics, TableId, Timestamp,
};

View File

@ -6,7 +6,7 @@ edition = "2021"
[dependencies]
arrow = { version = "13", features = ["prettyprint"] }
chrono = { version = "0.4", default-features = false }
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
datafusion = { path = "../datafusion" }
datafusion_util = { path = "../datafusion_util" }
itertools = "0.10"

View File

@ -1,4 +1,4 @@
use data_types2::{DeleteExpr, Op, Scalar};
use data_types::{DeleteExpr, Op, Scalar};
use snafu::{ResultExt, Snafu};
use std::ops::Deref;

View File

@ -1,6 +1,6 @@
use crate::delete_expr::{df_to_expr, expr_to_df};
use chrono::DateTime;
use data_types2::{DeleteExpr, DeletePredicate, TimestampRange, Tombstone};
use data_types::{DeleteExpr, DeletePredicate, TimestampRange, Tombstone};
use datafusion::logical_plan::{lit, Column, Expr, Operator};
use snafu::{ResultExt, Snafu};
use sqlparser::{
@ -397,7 +397,7 @@ pub fn parse_http_delete_request(input: &str) -> Result<HttpDeleteRequest> {
#[cfg(test)]
mod tests {
use super::*;
use data_types2::{Op, Scalar};
use data_types::{Op, Scalar};
#[test]
fn test_time_range_valid() {

View File

@ -13,7 +13,7 @@ pub mod delete_predicate;
pub mod rewrite;
pub mod rpc_predicate;
use data_types2::{TimestampRange, MAX_NANO_TIME, MIN_NANO_TIME};
use data_types::{TimestampRange, MAX_NANO_TIME, MIN_NANO_TIME};
use datafusion::{
error::DataFusionError,
logical_plan::{binary_expr, col, lit_timestamp_nano, Expr, Operator},

View File

@ -10,7 +10,7 @@ async-trait = "0.1.53"
backoff = { path = "../backoff" }
bytes = "1.0"
client_util = { path = "../client_util" }
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
datafusion = { path = "../datafusion" }
datafusion_util = { path = "../datafusion_util" }
futures = "0.3"

View File

@ -1,10 +1,4 @@
//! Namespace cache.
use std::{collections::HashMap, sync::Arc, time::Duration};
use backoff::{Backoff, BackoffConfig};
use data_types2::{NamespaceId, NamespaceSchema};
use iox_catalog::interface::{get_schema_by_name, Catalog};
use iox_time::TimeProvider;
use crate::cache_system::{
backend::{
@ -14,6 +8,11 @@ use crate::cache_system::{
driver::Cache,
loader::FunctionLoader,
};
use backoff::{Backoff, BackoffConfig};
use data_types::{NamespaceId, NamespaceSchema};
use iox_catalog::interface::{get_schema_by_name, Catalog};
use iox_time::TimeProvider;
use std::{collections::HashMap, sync::Arc, time::Duration};
/// Duration to keep existing namespaces.
pub const TTL_EXISTING: Duration = Duration::from_secs(10);
@ -173,7 +172,7 @@ mod tests {
use std::collections::BTreeMap;
use crate::cache::test_util::assert_histogram_metric_count;
use data_types2::{ColumnSchema, ColumnType, TableSchema};
use data_types::{ColumnSchema, ColumnType, TableSchema};
use iox_tests::util::TestCatalog;
use super::*;

View File

@ -1,12 +1,11 @@
//! Partition cache.
use std::{collections::HashMap, sync::Arc};
use backoff::{Backoff, BackoffConfig};
use data_types2::{PartitionId, SequencerId};
use iox_catalog::interface::Catalog;
use schema::sort::SortKey;
use crate::cache_system::{driver::Cache, loader::FunctionLoader};
use backoff::{Backoff, BackoffConfig};
use data_types::{PartitionId, SequencerId};
use iox_catalog::interface::Catalog;
use schema::sort::SortKey;
use std::{collections::HashMap, sync::Arc};
/// Cache for partition-related attributes.
#[derive(Debug)]

View File

@ -1,20 +1,20 @@
//! Processed tombstone cache.
use std::{collections::HashMap, sync::Arc, time::Duration};
use backoff::{Backoff, BackoffConfig};
use data_types2::{ParquetFileId, TombstoneId};
use iox_catalog::interface::Catalog;
use iox_time::TimeProvider;
use crate::cache_system::{
backend::ttl::{TtlBackend, TtlProvider},
driver::Cache,
loader::FunctionLoader,
};
use backoff::{Backoff, BackoffConfig};
use data_types::{ParquetFileId, TombstoneId};
use iox_catalog::interface::Catalog;
use iox_time::TimeProvider;
use std::{collections::HashMap, sync::Arc, time::Duration};
/// Duration to keep "tombstone is NOT processed yet".
///
/// Marking tombstones as processed is a mere optimization, so we can keep this cache entry for a while.
/// Marking tombstones as processed is a mere optimization, so we can keep this cache entry for a
/// while.
pub const TTL_NOT_PROCESSED: Duration = Duration::from_secs(100);
/// Cache for processed tombstones.

View File

@ -1,10 +1,4 @@
//! Table cache.
use std::{collections::HashMap, sync::Arc, time::Duration};
use backoff::{Backoff, BackoffConfig};
use data_types2::{NamespaceId, Table, TableId};
use iox_catalog::interface::Catalog;
use iox_time::TimeProvider;
use crate::cache_system::{
backend::{
@ -14,6 +8,11 @@ use crate::cache_system::{
driver::Cache,
loader::FunctionLoader,
};
use backoff::{Backoff, BackoffConfig};
use data_types::{NamespaceId, Table, TableId};
use iox_catalog::interface::Catalog;
use iox_time::TimeProvider;
use std::{collections::HashMap, sync::Arc, time::Duration};
/// Duration to keep non-existing tables.
pub const TTL_NON_EXISTING: Duration = Duration::from_secs(10);

View File

@ -2,7 +2,7 @@
use crate::cache::CatalogCache;
use arrow::record_batch::RecordBatch;
use data_types2::{
use data_types::{
ChunkAddr, ChunkId, ChunkOrder, DeletePredicate, ParquetFile, ParquetFileId,
ParquetFileWithMetadata, PartitionId, SequenceNumber, SequencerId, TimestampMinMax,
};

View File

@ -1,5 +1,5 @@
use crate::chunk::{ChunkStorage, QuerierChunk};
use data_types2::{
use data_types::{
ChunkAddr, ChunkId, ChunkOrder, DeletePredicate, PartitionId, TableSummary, TimestampMinMax,
};
use observability_deps::tracing::debug;

View File

@ -6,7 +6,7 @@ use crate::{
};
use async_trait::async_trait;
use backoff::{Backoff, BackoffConfig};
use data_types2::Namespace;
use data_types::Namespace;
use object_store::DynObjectStore;
use parking_lot::RwLock;
use query::exec::Executor;

View File

@ -5,7 +5,7 @@ use self::{
use crate::cache::CatalogCache;
use arrow::{datatypes::DataType, error::ArrowError, record_batch::RecordBatch};
use async_trait::async_trait;
use data_types2::{
use data_types::{
ChunkAddr, ChunkId, ChunkOrder, ColumnSummary, InfluxDbType, PartitionId, SequenceNumber,
SequencerId, StatValues, Statistics, TableSummary, TimestampMinMax,
};
@ -458,7 +458,7 @@ impl QueryChunkMeta for IngesterPartition {
None
}
fn delete_predicates(&self) -> &[Arc<data_types2::DeletePredicate>] {
fn delete_predicates(&self) -> &[Arc<data_types::DeletePredicate>] {
&[]
}
@ -478,7 +478,7 @@ impl QueryChunk for IngesterPartition {
self.chunk_id
}
fn addr(&self) -> data_types2::ChunkAddr {
fn addr(&self) -> data_types::ChunkAddr {
ChunkAddr {
db_name: Arc::clone(&self.namespace_name),
table_name: Arc::clone(&self.table_name),

View File

@ -1,10 +1,11 @@
//! Namespace within the whole database.
use crate::{
cache::CatalogCache, chunk::ParquetChunkAdapter, ingester::IngesterConnection,
query_log::QueryLog, table::QuerierTable,
};
use backoff::BackoffConfig;
use data_types2::{NamespaceId, NamespaceSchema};
use data_types::{NamespaceId, NamespaceSchema};
use object_store::DynObjectStore;
use query::exec::Executor;
use schema::Schema;
@ -129,7 +130,7 @@ impl QuerierNamespace {
mod tests {
use super::*;
use crate::namespace::test_util::querier_namespace;
use data_types2::ColumnType;
use data_types::ColumnType;
use iox_tests::util::TestCatalog;
use schema::{builder::SchemaBuilder, InfluxColumnType, InfluxFieldType};

View File

@ -1,9 +1,13 @@
//! This module contains implementations of [query] interfaces for
//! [QuerierNamespace].
use std::{any::Any, collections::HashMap, sync::Arc};
//! This module contains implementations of [query] interfaces for [QuerierNamespace].
use crate::{
namespace::QuerierNamespace,
query_log::QueryLog,
system_tables::{SystemSchemaProvider, SYSTEM_SCHEMA},
table::QuerierTable,
};
use async_trait::async_trait;
use data_types2::NamespaceId;
use data_types::NamespaceId;
use datafusion::{
catalog::{catalog::CatalogProvider, schema::SchemaProvider},
datasource::TableProvider,
@ -15,15 +19,9 @@ use query::{
QueryChunk, QueryCompletedToken, QueryDatabase, QueryDatabaseError, QueryText, DEFAULT_SCHEMA,
};
use schema::Schema;
use std::{any::Any, collections::HashMap, sync::Arc};
use trace::ctx::SpanContext;
use crate::{
namespace::QuerierNamespace,
query_log::QueryLog,
system_tables::{SystemSchemaProvider, SYSTEM_SCHEMA},
table::QuerierTable,
};
impl QueryDatabaseMeta for QuerierNamespace {
fn table_names(&self) -> Vec<String> {
let mut names: Vec<_> = self.tables.keys().map(|s| s.to_string()).collect();
@ -193,7 +191,7 @@ mod tests {
use crate::namespace::test_util::querier_namespace;
use arrow::record_batch::RecordBatch;
use arrow_util::{assert_batches_eq, assert_batches_sorted_eq};
use data_types2::ColumnType;
use data_types::ColumnType;
use iox_tests::util::TestCatalog;
use query::frontend::sql::SqlQueryPlanner;

View File

@ -1,15 +1,14 @@
//! Ring buffer of queries that have been run with some brief information
use data_types::NamespaceId;
use iox_time::{Time, TimeProvider};
use parking_lot::Mutex;
use query::QueryText;
use std::{
collections::VecDeque,
sync::{atomic, Arc},
time::Duration,
};
use data_types2::NamespaceId;
use iox_time::{Time, TimeProvider};
use parking_lot::Mutex;
use query::QueryText;
use trace::ctx::TraceId;
// The query duration used for queries still running.

View File

@ -1,13 +1,7 @@
use std::{
any::Any,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use crate::query_log::QueryLog;
use arrow::{datatypes::SchemaRef, error::Result as ArrowResult, record_batch::RecordBatch};
use async_trait::async_trait;
use data_types2::NamespaceId;
use data_types::NamespaceId;
use datafusion::{
catalog::schema::SchemaProvider,
datasource::TableProvider,
@ -18,8 +12,12 @@ use datafusion::{
SendableRecordBatchStream, Statistics,
},
};
use crate::query_log::QueryLog;
use std::{
any::Any,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
mod queries;

View File

@ -1,16 +1,17 @@
use crate::system_tables::BatchIterator;
use crate::{
query_log::{QueryLog, QueryLogEntry},
system_tables::IoxSystemTable,
system_tables::{BatchIterator, IoxSystemTable},
};
use arrow::array::{ArrayRef, BooleanArray, Int64Array};
use arrow::{
array::{DurationNanosecondArray, StringArray, TimestampNanosecondArray},
array::{
ArrayRef, BooleanArray, DurationNanosecondArray, Int64Array, StringArray,
TimestampNanosecondArray,
},
datatypes::{DataType, Field, Schema, SchemaRef, TimeUnit},
error::Result,
record_batch::RecordBatch,
};
use data_types2::NamespaceId;
use data_types::NamespaceId;
use observability_deps::tracing::error;
use std::{collections::VecDeque, sync::Arc};

View File

@ -1,5 +1,4 @@
use std::{collections::HashMap, sync::Arc};
use self::query_access::QuerierTableChunkPruner;
use crate::{
chunk::ParquetChunkAdapter,
ingester::{self, IngesterPartition},
@ -8,14 +7,13 @@ use crate::{
IngesterConnection,
};
use backoff::{Backoff, BackoffConfig};
use data_types2::TableId;
use data_types::TableId;
use observability_deps::tracing::debug;
use predicate::Predicate;
use query::{provider::ChunkPruner, QueryChunk};
use schema::Schema;
use snafu::{ResultExt, Snafu};
use self::query_access::QuerierTableChunkPruner;
use std::{collections::HashMap, sync::Arc};
mod query_access;
mod state_reconciler;
@ -278,7 +276,7 @@ mod tests {
use arrow::record_batch::RecordBatch;
use assert_matches::assert_matches;
use data_types2::{ChunkId, ColumnType, SequenceNumber};
use data_types::{ChunkId, ColumnType, SequenceNumber};
use iox_tests::util::{now, TestCatalog};
use mutable_batch_lp::test_helpers::lp_to_mutable_batch;
use predicate::Predicate;

View File

@ -1,27 +1,27 @@
//! Logic to reconcile the state that the querier got the from catalog and from the ingester.
//!
//! # Usage
//!
//! The code in this module should be used like this:
//!
//! 1. **Ingester Request:** Request data from ingester(s). This will create [`IngesterPartition`]s.
//! 2. **Catalog Query:** Query parquet files and tombstones from catalog. It is important that this happens AFTER the
//! ingester request. This will create [`ParquetFileWithMetadata`] and [`Tombstone`].
//! 3. **Pruning:** Call [`filter_parquet_files`] and [`tombstone_exclude_list`] to filter out files and tombstones that
//! are too new (i.e. were created between step 1 and 2).
//! 2. **Catalog Query:** Query parquet files and tombstones from catalog. It is important that
//! this happens AFTER the ingester request. This will create [`ParquetFileWithMetadata`] and
//! [`Tombstone`].
//! 3. **Pruning:** Call [`filter_parquet_files`] and [`tombstone_exclude_list`] to filter out
//! files and tombstones that are too new (i.e. were created between step 1 and 2).
use crate::ingester::IngesterPartition;
use data_types::{
ParquetFileWithMetadata, PartitionId, SequenceNumber, SequencerId, Tombstone, TombstoneId,
};
use snafu::Snafu;
use std::{
collections::{HashMap, HashSet},
ops::Deref,
sync::Arc,
};
use data_types2::{
ParquetFileWithMetadata, PartitionId, SequenceNumber, SequencerId, Tombstone, TombstoneId,
};
use snafu::Snafu;
use crate::ingester::IngesterPartition;
/// Information about an ingester partition.
///
/// This is mostly the same as [`IngesterPartition`] but allows easier mocking.

View File

@ -1,7 +1,6 @@
use std::sync::Arc;
use data_types2::{DeletePredicate, SequenceNumber, SequencerId, Tombstone, TombstoneId};
use data_types::{DeletePredicate, SequenceNumber, SequencerId, Tombstone, TombstoneId};
use predicate::delete_predicate::parse_delete_predicate;
use std::sync::Arc;
/// Tombstone as it is handled by the querier.
#[derive(Debug, Clone)]

View File

@ -19,7 +19,7 @@ arrow_util = { path = "../arrow_util" }
async-trait = "0.1"
chrono = { version = "0.4", default-features = false }
croaring = "0.6"
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
datafusion = { path = "../datafusion" }
datafusion_util = { path = "../datafusion_util" }
executor = { path = "../executor"}

View File

@ -1,4 +1,5 @@
//! Query frontend for InfluxDB Storage gRPC requests
use crate::{
exec::{field::FieldColumns, make_non_null_checker, make_schema_pivot, IOxSessionContext},
plan::{
@ -11,7 +12,7 @@ use crate::{
QueryChunk, QueryDatabase,
};
use arrow::datatypes::DataType;
use data_types2::ChunkId;
use data_types::ChunkId;
use datafusion::{
error::DataFusionError,
logical_plan::{

View File

@ -9,18 +9,20 @@
)]
use async_trait::async_trait;
use data_types2::{
use data_types::{
ChunkAddr, ChunkId, ChunkOrder, DeletePredicate, InfluxDbType, PartitionId, TableSummary,
TimestampMinMax,
};
use datafusion::physical_plan::SendableRecordBatchStream;
use exec::{stringset::StringSet, IOxSessionContext};
use hashbrown::HashMap;
use observability_deps::tracing::{debug, trace};
use predicate::{rpc_predicate::QueryDatabaseMeta, Predicate, PredicateMatch};
use schema::selection::Selection;
use schema::{sort::SortKey, Schema, TIME_COLUMN_NAME};
use hashbrown::HashMap;
use schema::{
selection::Selection,
sort::{SortKey, SortKeyBuilder},
Schema, TIME_COLUMN_NAME,
};
use std::{collections::BTreeSet, fmt::Debug, iter::FromIterator, sync::Arc};
pub mod exec;
@ -32,8 +34,6 @@ pub mod statistics;
pub mod util;
pub use exec::context::{DEFAULT_CATALOG, DEFAULT_SCHEMA};
use schema::sort::SortKeyBuilder;
pub use query_functions::group_by::{Aggregate, WindowDuration};
/// Trait for an object (designed to be a Chunk) which can provide

View File

@ -4,7 +4,7 @@
//! separate rows)
use crate::{QueryChunk, QueryChunkMeta};
use data_types2::{
use data_types::{
ColumnSummary, DeletePredicate, ParquetFileWithMetadata, PartitionId, StatOverlap, Statistics,
TableSummary, TimestampMinMax,
};

View File

@ -3,7 +3,7 @@
use super::adapter::SchemaAdapterStream;
use crate::{exec::IOxSessionContext, QueryChunk};
use arrow::datatypes::SchemaRef;
use data_types2::TableSummary;
use data_types::TableSummary;
use datafusion::{
error::DataFusionError,
execution::context::TaskContext,

View File

@ -7,7 +7,7 @@ use arrow::{
},
datatypes::{DataType, Int32Type, TimeUnit},
};
use data_types2::{StatValues, Statistics};
use data_types::{StatValues, Statistics};
use datafusion::{
logical_plan::Column,
physical_optimizer::pruning::{PruningPredicate, PruningStatistics},

View File

@ -1,6 +1,6 @@
//! Code to translate IOx statistics to DataFusion statistics
use data_types2::{ColumnSummary, InfluxDbType, Statistics as IOxStatistics, TableSummary};
use data_types::{ColumnSummary, InfluxDbType, Statistics as IOxStatistics, TableSummary};
use datafusion::{
physical_plan::{ColumnStatistics, Statistics as DFStatistics},
scalar::ScalarValue,
@ -106,7 +106,7 @@ fn df_from_iox_col(col: &ColumnSummary) -> ColumnStatistics {
#[cfg(test)]
mod test {
use super::*;
use data_types2::{InfluxDbType, StatValues};
use data_types::{InfluxDbType, StatValues};
use schema::{builder::SchemaBuilder, InfluxFieldType};
use std::num::NonZeroU64;

View File

@ -3,20 +3,23 @@
//!
//! AKA it is a Mock
use crate::exec::{ExecutionContextProvider, Executor, ExecutorType, IOxSessionContext};
use crate::{
exec::stringset::{StringSet, StringSetRef},
Predicate, PredicateMatch, QueryChunk, QueryChunkMeta, QueryDatabase,
exec::{
stringset::{StringSet, StringSetRef},
ExecutionContextProvider, Executor, ExecutorType, IOxSessionContext,
},
Predicate, PredicateMatch, QueryChunk, QueryChunkError, QueryChunkMeta, QueryCompletedToken,
QueryDatabase, QueryDatabaseError, QueryText,
};
use crate::{QueryChunkError, QueryCompletedToken, QueryDatabaseError, QueryText};
use arrow::array::UInt64Array;
use arrow::{
array::{ArrayRef, DictionaryArray, Int64Array, StringArray, TimestampNanosecondArray},
array::{
ArrayRef, DictionaryArray, Int64Array, StringArray, TimestampNanosecondArray, UInt64Array,
},
datatypes::{DataType, Int32Type, TimeUnit},
record_batch::RecordBatch,
};
use async_trait::async_trait;
use data_types2::{
use data_types::{
ChunkAddr, ChunkId, ChunkOrder, ColumnSummary, DeletePredicate, InfluxDbType, PartitionId,
StatValues, Statistics, TableSummary, TimestampMinMax,
};
@ -27,12 +30,11 @@ use hashbrown::HashSet;
use observability_deps::tracing::debug;
use parking_lot::Mutex;
use predicate::rpc_predicate::QueryDatabaseMeta;
use schema::selection::Selection;
use schema::{
builder::SchemaBuilder, merge::SchemaMerger, sort::SortKey, InfluxColumnType, Schema,
builder::SchemaBuilder, merge::SchemaMerger, selection::Selection, sort::SortKey,
InfluxColumnType, Schema,
};
use std::num::NonZeroU64;
use std::{collections::BTreeMap, fmt, sync::Arc};
use std::{collections::BTreeMap, fmt, num::NonZeroU64, sync::Arc};
use trace::ctx::SpanContext;
#[derive(Debug)]

View File

@ -9,7 +9,7 @@ description = "Tests of the query engine against different database configuratio
arrow = { version = "13", features = ["prettyprint"] }
async-trait = "0.1"
backoff = { path = "../backoff" }
data_types2 = { path = "../data_types2" }
data_types = { path = "../data_types" }
datafusion = { path = "../datafusion" }
dml = { path = "../dml" }
futures = "0.3"

View File

@ -1,6 +1,6 @@
use crate::scenarios::*;
use arrow::datatypes::DataType;
use data_types2::{MAX_NANO_TIME, MIN_NANO_TIME};
use data_types::{MAX_NANO_TIME, MIN_NANO_TIME};
use datafusion::logical_plan::{col, lit};
use predicate::{rpc_predicate::InfluxRpcPredicate, PredicateBuilder};
use query::{

View File

@ -1,6 +1,6 @@
//! Tests for the Influx gRPC queries
use crate::scenarios::*;
use data_types2::{MAX_NANO_TIME, MIN_NANO_TIME};
use data_types::{MAX_NANO_TIME, MIN_NANO_TIME};
use datafusion::logical_plan::{col, lit};
use predicate::{rpc_predicate::InfluxRpcPredicate, PredicateBuilder};
use query::{

Some files were not shown because too many files have changed in this diff Show More