refactor: rename Database --> TSDatabase to better reflect its purpose (#510)

* refactor: rename Database --> TSDatabase to better reflect its purpose

* refactor: rename field_columns to field_column_names

* fix: clippy?
pull/24376/head
Andrew Lamb 2020-12-01 12:37:11 -05:00 committed by GitHub
parent a2e5af1508
commit 5ef499bb63
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 28 additions and 28 deletions

View File

@ -23,10 +23,10 @@ pub mod window;
use self::group_by::GroupByAndAggregate;
use self::predicate::{Predicate, TimestampRange};
/// A `Database` describes something that stores InfluxDB Timeseries
/// data from Line Protocol (`ParsedLine` structures) and provides an
/// interface to query that data. The query methods on this trait such
/// as `tag_columns are specific to this data model.
/// A `TSDatabase` describes something that Timeseries data using the
/// InfluxDB Line Protocol data model (`ParsedLine` structures) and
/// provides an interface to query that data. The query methods on
/// this trait such as `tag_columns are specific to this data model.
///
/// The IOx storage engine implements this trait to provide Timeseries
/// specific queries, but also provides more generic access to the same
@ -44,7 +44,7 @@ use self::predicate::{Predicate, TimestampRange};
/// categories with the same data type, columns of different
/// categories are treated differently in the different query types.
#[async_trait]
pub trait Database: Debug + Send + Sync {
pub trait TSDatabase: Debug + Send + Sync {
type Error: std::error::Error + Send + Sync + 'static;
/// writes parsed lines into this database
@ -71,7 +71,7 @@ pub trait Database: Debug + Send + Sync {
/// database which store fields (as defined in the data written
/// via `write_lines`), and which have at least one row which
/// matches the conditions listed on `predicate`.
async fn field_columns(&self, predicate: Predicate) -> Result<FieldListPlan, Self::Error>;
async fn field_column_names(&self, predicate: Predicate) -> Result<FieldListPlan, Self::Error>;
/// Returns a plan which finds the distinct values in the
/// `column_name` column of this database which pass the
@ -118,7 +118,7 @@ pub trait Database: Debug + Send + Sync {
/// Storage for `Databases` which can be retrieved by name
pub trait DatabaseStore: Debug + Send + Sync {
/// The type of database that is stored by this DatabaseStore
type Database: Database;
type Database: TSDatabase;
/// The type of error this DataBase store generates
type Error: std::error::Error + Send + Sync + 'static;

View File

@ -10,7 +10,7 @@ use crate::{
stringset::{StringSet, StringSetRef},
SeriesSetPlans, StringSetPlan,
},
Database, DatabaseStore, Predicate, TimestampRange,
DatabaseStore, Predicate, TSDatabase, TimestampRange,
};
use data_types::data::ReplicatedWrite;
@ -245,7 +245,7 @@ fn predicate_to_test_string(predicate: &Predicate) -> String {
}
#[async_trait]
impl Database for TestDatabase {
impl TSDatabase for TestDatabase {
type Error = TestError;
/// Writes parsed lines into this database
@ -310,7 +310,7 @@ impl Database for TestDatabase {
Ok(column_names.into())
}
async fn field_columns(&self, predicate: Predicate) -> Result<FieldListPlan, Self::Error> {
async fn field_column_names(&self, predicate: Predicate) -> Result<FieldListPlan, Self::Error> {
// save the request
let predicate = predicate_to_test_string(&predicate);

View File

@ -16,7 +16,7 @@ use data_types::{
};
use influxdb_line_protocol::ParsedLine;
use object_store::ObjectStore;
use query::Database;
use query::TSDatabase;
use write_buffer::Db as WriteBufferDb;
use async_trait::async_trait;

View File

@ -17,7 +17,7 @@ use tracing::{debug, error, info};
use arrow_deps::arrow;
use influxdb_line_protocol::parse_lines;
use query::{org_and_bucket_to_database, Database, DatabaseStore};
use query::{org_and_bucket_to_database, DatabaseStore, TSDatabase};
use bytes::{Bytes, BytesMut};
use futures::{self, StreamExt};

View File

@ -34,7 +34,7 @@ use query::{
},
org_and_bucket_to_database,
predicate::PredicateBuilder,
Database, DatabaseStore,
DatabaseStore, TSDatabase,
};
use snafu::{OptionExt, ResultExt, Snafu};
@ -1104,13 +1104,13 @@ where
.await
.context(DatabaseNotFound { db_name: &db_name })?;
let fieldlist_plan = db
.field_columns(predicate)
.await
.map_err(|e| Error::ListingFields {
db_name: db_name.clone(),
source: Box::new(e),
})?;
let fieldlist_plan =
db.field_column_names(predicate)
.await
.map_err(|e| Error::ListingFields {
db_name: db_name.clone(),
source: Box::new(e),
})?;
let fieldlist =
executor

View File

@ -1,6 +1,6 @@
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
use influxdb_line_protocol as line_parser;
use query::Database;
use query::TSDatabase;
use wal::{Entry, WalBuilder};
use write_buffer::{restore_partitions_from_wal, Db};

View File

@ -6,7 +6,7 @@ use query::group_by::WindowDuration;
use query::{
exec::{stringset::StringSet, FieldListPlan, SeriesSetPlan, SeriesSetPlans, StringSetPlan},
predicate::Predicate,
Database,
TSDatabase,
};
use wal::{
writer::{start_wal_sync_task, Error as WalWriterError, WalDetails},
@ -342,7 +342,7 @@ impl Db {
}
#[async_trait]
impl Database for Db {
impl TSDatabase for Db {
type Error = Error;
// TODO: writes lines creates a column named "time" for the timestamp data. If
@ -437,7 +437,7 @@ impl Database for Db {
}
/// return all field names in this database, while applying optional predicates
async fn field_columns(&self, predicate: Predicate) -> Result<FieldListPlan, Self::Error> {
async fn field_column_names(&self, predicate: Predicate) -> Result<FieldListPlan, Self::Error> {
let mut filter = PartitionTableFilter::new(predicate);
let mut visitor = TableFieldPredVisitor::new();
self.visit_tables(&mut filter, &mut visitor).await?;
@ -1152,7 +1152,7 @@ mod tests {
Executor,
},
predicate::PredicateBuilder,
Database,
TSDatabase,
};
use arrow::{
@ -2109,7 +2109,7 @@ disk bytes=23432323i 1600136510000000000",
// make sure table filtering works (no tables match)
let plan = db
.field_columns(predicate)
.field_column_names(predicate)
.await
.expect("Created field_columns plan successfully");
@ -2126,7 +2126,7 @@ disk bytes=23432323i 1600136510000000000",
.build();
let plan = db
.field_columns(predicate)
.field_column_names(predicate)
.await
.expect("Created field_columns plan successfully");
@ -2191,7 +2191,7 @@ disk bytes=23432323i 1600136510000000000",
.build();
let plan = db
.field_columns(predicate)
.field_column_names(predicate)
.await
.expect("Created field_columns plan successfully");