fix: Consistently use 'namespace' instead of 'database' in comments and other internal text

pull/24376/head
Carol (Nichols || Goulding) 2022-11-10 16:05:24 -05:00
parent a3f2fe489c
commit bdff4e8848
No known key found for this signature in database
GPG Key ID: E907EE5A736F87D4
33 changed files with 54 additions and 57 deletions

View File

@ -235,7 +235,7 @@ See [docs/testing.md] for more information
Data can be written to InfluxDB IOx by sending [line protocol] format to the `/api/v2/write` endpoint or using the CLI. Data can be written to InfluxDB IOx by sending [line protocol] format to the `/api/v2/write` endpoint or using the CLI.
For example, assuming you are running in local mode, this command will send data in the `test_fixtures/lineproto/metrics.lp` file to the `company_sensors` database. For example, assuming you are running in local mode, this command will send data in the `test_fixtures/lineproto/metrics.lp` file to the `company_sensors` namespace.
```shell ```shell
./target/debug/influxdb_iox -vv write company_sensors test_fixtures/lineproto/metrics.lp --host http://localhost:8080 ./target/debug/influxdb_iox -vv write company_sensors test_fixtures/lineproto/metrics.lp --host http://localhost:8080
@ -243,7 +243,7 @@ For example, assuming you are running in local mode, this command will send data
Note that `--host http://localhost:8080` is required as the `/v2/api` endpoint is hosted on port `8080` while the default is the querier gRPC port `8082`. Note that `--host http://localhost:8080` is required as the `/v2/api` endpoint is hosted on port `8080` while the default is the querier gRPC port `8082`.
To query the data stored in the `company_sensors` database: To query the data stored in the `company_sensors` namespace:
```shell ```shell
./target/debug/influxdb_iox query company_sensors "SELECT * FROM cpu LIMIT 10" ./target/debug/influxdb_iox query company_sensors "SELECT * FROM cpu LIMIT 10"

View File

@ -186,7 +186,7 @@ impl From<DmlDelete> for DmlOperation {
} }
} }
/// A collection of writes to potentially multiple tables within the same database /// A collection of writes to potentially multiple tables within the same namespace
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct DmlWrite { pub struct DmlWrite {
/// The namespace being written to /// The namespace being written to

View File

@ -62,7 +62,7 @@ Connected to IOx Server
Set output format format to pretty Set output format format to pretty
Ready for commands. (Hint: try 'help;') Ready for commands. (Hint: try 'help;')
> use 26f7e5a4b7be365b_917b97a92e883afc; > use 26f7e5a4b7be365b_917b97a92e883afc;
You are now in remote mode, querying database 26f7e5a4b7be365b_917b97a92e883afc You are now in remote mode, querying namespace 26f7e5a4b7be365b_917b97a92e883afc
26f7e5a4b7be365b_917b97a92e883afc> select count(*) from cpu; 26f7e5a4b7be365b_917b97a92e883afc> select count(*) from cpu;
+-----------------+ +-----------------+
| COUNT(UInt8(1)) | | COUNT(UInt8(1)) |

View File

@ -7,7 +7,7 @@
# The full list of available configuration values can be found by in # The full list of available configuration values can be found by in
# the command line help (e.g. `env: INFLUXDB_IOX_DB_DIR=`): # the command line help (e.g. `env: INFLUXDB_IOX_DB_DIR=`):
# #
# ./influxdb_iox run database --help # ./influxdb_iox run --help
# #
# #
# The identifier for the server. Used for writing to object storage and as # The identifier for the server. Used for writing to object storage and as

View File

@ -18,6 +18,6 @@ The rationale for this assumption is to enable IOx to be operated in a container
This configuration is used when running IOx on a "plain old server" operating at the edge as well as for local testing. This configuration is used when running IOx on a "plain old server" operating at the edge as well as for local testing.
In this configuration, IOx assumes the contents of the local file system are preserved at least as long as the life of the IOx Database and that external measures are taken to backup or otherwise manage this. In this configuration, IOx assumes the contents of the local file system are preserved at least as long as the life of the IOx instance and that external measures are taken to backup or otherwise manage this.
In other words, unsurprisingly, when using the local filesystem as object storage, the durability of the data in IOx is tied to the durability of the filesystem. In other words, unsurprisingly, when using the local filesystem as object storage, the durability of the data in IOx is tied to the durability of the filesystem.

View File

@ -2,7 +2,7 @@
## Background ## Background
Observability context is how a component exposes metrics, traces, logs, panics, etc... in a way that places them in the context of the wider system. Most commonly this might be the database name, but might also be the table name, chunk ID, etc... Crucially this information may not be relevant to the component's primary function, e.g. if we never needed to observe `Db` it wouldn't need to know the database name at all, as only `Server` would need to know Observability context is how a component exposes metrics, traces, logs, panics, etc... in a way that places them in the context of the wider system. Most commonly this might be the namespace name, but might also be the table name, chunk ID, etc... Crucially this information may not be relevant to the component's primary function, e.g. if we never needed to observe `Db` it wouldn't need to know the namespace name at all, as only `Server` would need to know
Broadly speaking there are 3 approaches to how to inject this context: Broadly speaking there are 3 approaches to how to inject this context:
@ -12,14 +12,13 @@ Broadly speaking there are 3 approaches to how to inject this context:
Effectively the 3 trade-off between "polluting" the object or the callsites. Effectively the 3 trade-off between "polluting" the object or the callsites.
To give a concrete example, from a purely logic perspective the catalog does not need to know the database name, only the path To give a concrete example, from a purely logic perspective the catalog does not need to know the namespace name, only the path to the object store. However, it is helpful for logs, metrics, etc... to be in terms of namespace name.
to the object store. However, it is helpful for logs, metrics, etc... to be in terms of database name.
The three approaches would therefore be The three approaches would therefore be
1. Inject database name on construction onto the catalog object 1. Inject namespace name on construction onto the catalog object
2. Inject metrics, logs, etc... wrappers that carry the database name context internally 2. Inject metrics, logs, etc... wrappers that carry the namespace name context internally
3. Inject database name at every call site, either explicitly passing it as an argument, or implicitly by wrapping in a span, mapping the returned error, etc... 3. Inject namespace name at every call site, either explicitly passing it as an argument, or implicitly by wrapping in a span, mapping the returned error, etc...
## Outcome ## Outcome

View File

@ -131,7 +131,7 @@ In this section, IOx specific SQL tables, commands, and extensions are documente
## System Tables ## System Tables
In addition to the SQL standard `information_schema`, IOx contains several *system tables* that provide access to IOx specific information. The information in each system table is scoped to that particular database. Cross database queries are not possible due to the design of IOx's security model. In addition to the SQL standard `information_schema`, IOx contains several *system tables* that provide access to IOx specific information. The information in each system table is scoped to that particular namespace. Cross namespace queries are not possible due to the design of IOx's security model.
### `system.queries` ### `system.queries`
`system.queries` contains information about queries run against this IOx instance `system.queries` contains information about queries run against this IOx instance

View File

@ -88,14 +88,14 @@ set.
### Configuration differences when running the tests ### Configuration differences when running the tests
When running `influxdb_iox run database`, you can pick one object store to use. When running the tests, When running `influxdb_iox run`, you can pick one object store to use. When running the tests, you
you can run them against all the possible object stores. There's still only one can run them against all the possible object stores. There's still only one `INFLUXDB_IOX_BUCKET`
`INFLUXDB_IOX_BUCKET` variable, though, so that will set the bucket name for all configured object variable, though, so that will set the bucket name for all configured object stores. Use the same
stores. Use the same bucket name when setting up the different services. bucket name when setting up the different services.
Other than possibly configuring multiple object stores, configuring the tests to use the object Other than possibly configuring multiple object stores, configuring the tests to use the object
store services is the same as configuring the server to use an object store service. See the output store services is the same as configuring the server to use an object store service. See the output
of `influxdb_iox run database --help` for instructions. of `influxdb_iox run --help` for instructions.
## InfluxDB 2 Client ## InfluxDB 2 Client

View File

@ -4,7 +4,7 @@ option go_package = "github.com/influxdata/iox/querier/v1";
// Request body for ticket in "end-user to querier" flight requests. // Request body for ticket in "end-user to querier" flight requests.
message ReadInfo { message ReadInfo {
// Namespace(/database) name. // Namespace name.
string namespace_name = 1; string namespace_name = 1;
// SQL query. // SQL query.

View File

@ -11,14 +11,15 @@ pub enum Error {
ClientError(#[from] influxdb_iox_client::error::Error), ClientError(#[from] influxdb_iox_client::error::Error),
} }
/// Write data into the specified database /// Update the specified namespace's data retention period
#[derive(Debug, clap::Parser)] #[derive(Debug, clap::Parser)]
pub struct Config { pub struct Config {
/// The namespace to update the retention period for /// The namespace to update the retention period for
#[clap(action)] #[clap(action)]
namespace: String, namespace: String,
/// Num of hours of the retention period of this namespace. Default is 0 representing infinite retention /// Num of hours of the retention period of this namespace. Default is 0 representing infinite
/// retention
#[clap(action, long, short = 'c', default_value = "0")] #[clap(action, long, short = 'c', default_value = "0")]
retention_hours: u32, retention_hours: u32,
} }

View File

@ -47,7 +47,7 @@ pub enum Error {
pub type Result<T, E = Error> = std::result::Result<T, E>; pub type Result<T, E = Error> = std::result::Result<T, E>;
/// Write data into the specified database /// Write data into the specified namespace
#[derive(Debug, clap::Parser)] #[derive(Debug, clap::Parser)]
pub struct Config { pub struct Config {
/// If specified, restricts the maxium amount of line protocol /// If specified, restricts the maxium amount of line protocol

View File

@ -115,9 +115,9 @@ Command are generally structured in the form:
<type of object> <action> <arguments> <type of object> <action> <arguments>
For example, a command such as the following shows all actions For example, a command such as the following shows all actions
available for database chunks, including get and list. available for namespaces, including `list` and `retention`.
influxdb_iox database chunk --help influxdb_iox namespace --help
"# "#
)] )]
struct Config { struct Config {
@ -184,13 +184,13 @@ enum Command {
/// Various commands for compactor manipulation /// Various commands for compactor manipulation
Compactor(Box<commands::compactor::Config>), Compactor(Box<commands::compactor::Config>),
/// Interrogate internal database data /// Interrogate internal data
Debug(commands::debug::Config), Debug(commands::debug::Config),
/// Initiate a read request to the gRPC storage service. /// Initiate a read request to the gRPC storage service.
Storage(commands::storage::Config), Storage(commands::storage::Config),
/// Write data into the specified database /// Write data into the specified namespace
Write(commands::write::Config), Write(commands::write::Config),
/// Query the data with SQL /// Query the data with SQL

View File

@ -110,7 +110,7 @@ where
} }
} }
/// Query the given database with the given SQL query, and return a [`PerformQuery`] instance /// Query the given namespace with the given SQL query, and return a [`PerformQuery`] instance
/// that streams low-level message results. /// that streams low-level message results.
pub async fn perform_query(&mut self, request: T) -> Result<PerformQuery<T::Response>, Error> { pub async fn perform_query(&mut self, request: T) -> Result<PerformQuery<T::Response>, Error> {
PerformQuery::<T::Response>::new(self, request).await PerformQuery::<T::Response>::new(self, request).await

View File

@ -21,8 +21,7 @@ pub use low_level::{Client as LowLevelClient, PerformQuery as LowLevelPerformQue
use self::low_level::LowLevelMessage; use self::low_level::LowLevelMessage;
/// Error responses when querying an IOx database using the Arrow Flight gRPC /// Error responses when querying an IOx namespace using the Arrow Flight gRPC API.
/// API.
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
/// There were no FlightData messages returned when we expected to get one /// There were no FlightData messages returned when we expected to get one
@ -124,7 +123,7 @@ impl Client {
} }
} }
/// Query the given database with the given SQL query, and return a /// Query the given namespace with the given SQL query, and return a
/// [`PerformQuery`] instance that streams Arrow `RecordBatch` results. /// [`PerformQuery`] instance that streams Arrow `RecordBatch` results.
pub async fn perform_query(&mut self, request: ReadInfo) -> Result<PerformQuery, Error> { pub async fn perform_query(&mut self, request: ReadInfo) -> Result<PerformQuery, Error> {
PerformQuery::new(self, request).await PerformQuery::new(self, request).await

View File

@ -101,7 +101,7 @@ impl Client {
/// Write the [LineProtocol] formatted string in `lp_data` to /// Write the [LineProtocol] formatted string in `lp_data` to
/// namespace `namespace`. /// namespace `namespace`.
/// ///
/// Returns the number of bytes which were written to the database /// Returns the number of bytes which were written to the namespace.
/// ///
/// [LineProtocol]: https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/#data-types-and-format /// [LineProtocol]: https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/#data-types-and-format
pub async fn write_lp( pub async fn write_lp(
@ -119,7 +119,7 @@ impl Client {
/// individual lines (points) do not cross these strings /// individual lines (points) do not cross these strings
/// ///
/// Returns the number of bytes, in total, which were written to /// Returns the number of bytes, in total, which were written to
/// the database /// the namespace.
/// ///
/// [LineProtocol]: https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/#data-types-and-format /// [LineProtocol]: https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/#data-types-and-format
pub async fn write_lp_stream( pub async fn write_lp_stream(

View File

@ -36,10 +36,9 @@ use ::generated_types::google::protobuf::*;
use observability_deps::tracing::{debug, trace}; use observability_deps::tracing::{debug, trace};
use std::num::NonZeroU64; use std::num::NonZeroU64;
/// InfluxDB IOx deals with database names. The gRPC interface deals /// InfluxDB IOx deals with namespace names. The gRPC interface deals with org_id and bucket_id
/// with org_id and bucket_id represented as 16 digit hex /// represented as 16 digit hex values. This struct manages creating the org_id, bucket_id, and
/// values. This struct manages creating the org_id, bucket_id, /// namespace names to be consistent with the implementation.
/// and database names to be consistent with the implementation
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct OrgAndBucket { pub struct OrgAndBucket {
org_id: NonZeroU64, org_id: NonZeroU64,

View File

@ -416,7 +416,7 @@ pub trait ShardRepo: Send + Sync {
} }
/// Functions for working with IOx partitions in the catalog. Note that these are how IOx splits up /// Functions for working with IOx partitions in the catalog. Note that these are how IOx splits up
/// data within a database, which is different than Kafka partitions. /// data within a namespace, which is different than Kafka partitions.
#[async_trait] #[async_trait]
pub trait PartitionRepo: Send + Sync { pub trait PartitionRepo: Send + Sync {
/// create or get a partition record for the given partition key, shard and table /// create or get a partition record for the given partition key, shard and table

View File

@ -15,7 +15,7 @@ use datafusion::physical_plan::{
}; };
use futures::Stream; use futures::Stream;
/// Database schema creation / validation errors. /// Schema creation / validation errors.
#[allow(clippy::enum_variant_names)] #[allow(clippy::enum_variant_names)]
#[derive(Debug, Snafu)] #[derive(Debug, Snafu)]
pub enum Error { pub enum Error {

View File

@ -100,9 +100,8 @@ pub async fn http_listener(addr: SocketAddr) -> Result<AddrIncoming> {
Ok(listener) Ok(listener)
} }
/// Instantiates the gRPC and optional HTTP listeners and returns a /// Instantiates the gRPC and optional HTTP listeners and returns a `Future` that completes when
/// Future that completes when these listeners, the Server, Databases, /// the listeners have all exited or the `frontend_shutdown` token is called.
/// etc... have all exited or the frontend_shutdown token is called.
pub async fn serve( pub async fn serve(
common_state: CommonServerState, common_state: CommonServerState,
frontend_shutdown: CancellationToken, frontend_shutdown: CancellationToken,

View File

@ -11,7 +11,7 @@ pub enum CommonServerStateError {
Tracing { source: trace_exporters::Error }, Tracing { source: trace_exporters::Error },
} }
/// Common state used by all server types (e.g. `Database` and `Router`) /// Common state used by all server types
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct CommonServerState { pub struct CommonServerState {
run_config: RunConfig, run_config: RunConfig,

View File

@ -387,7 +387,7 @@ where
.await? .await?
.for_each(|(ns, schema)| { .for_each(|(ns, schema)| {
let name = NamespaceName::try_from(ns.name) let name = NamespaceName::try_from(ns.name)
.expect("cannot convert existing namespace name to database name"); .expect("cannot convert existing namespace string to a `NamespaceName` instance");
cache.put_schema(name, schema); cache.put_schema(name, schema);
}); });

View File

@ -20,7 +20,7 @@ pub struct ParquetChunk {
/// Schema that goes with this table's parquet file /// Schema that goes with this table's parquet file
schema: Arc<Schema>, schema: Arc<Schema>,
/// Persists the parquet file within a database's relative path /// Persists the parquet file within a namespace's relative path
store: ParquetStorage, store: ParquetStorage,
} }

View File

@ -24,7 +24,7 @@ use data_types::{NamespaceId, ParquetFile, PartitionId, ShardId, TableId};
use object_store::path::Path; use object_store::path::Path;
use uuid::Uuid; use uuid::Uuid;
/// Location of a Parquet file within a database's object store. /// Location of a Parquet file within a namespace's object store.
/// The exact format is an implementation detail and is subject to change. /// The exact format is an implementation detail and is subject to change.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)] #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)]
pub struct ParquetFilePath { pub struct ParquetFilePath {

View File

@ -65,7 +65,7 @@ pub struct QuerierDatabase {
/// ///
/// This should be a 1-to-1 relation to the number of active queries. /// This should be a 1-to-1 relation to the number of active queries.
/// ///
/// If the same database is requested twice for different queries, it is counted twice. /// If the same namespace is requested twice for different queries, it is counted twice.
query_execution_semaphore: Arc<InstrumentedAsyncSemaphore>, query_execution_semaphore: Arc<InstrumentedAsyncSemaphore>,
/// Sharder to determine which ingesters to query for a particular table and namespace. /// Sharder to determine which ingesters to query for a particular table and namespace.

View File

@ -1,4 +1,4 @@
//! Namespace within the whole database. //! Namespace within the whole catalog.
use crate::{ use crate::{
cache::{namespace::CachedNamespace, CatalogCache}, cache::{namespace::CachedNamespace, CatalogCache},

View File

@ -35,7 +35,7 @@ mod tests {
#[test] #[test]
fn test_put_get() { fn test_put_get() {
let ns = NamespaceName::new("test").expect("database name is valid"); let ns = NamespaceName::new("test").expect("namespace name is valid");
let cache = Arc::new(MemoryNamespaceCache::default()); let cache = Arc::new(MemoryNamespaceCache::default());
assert!(cache.get_schema(&ns).is_none()); assert!(cache.get_schema(&ns).is_none());

View File

@ -224,7 +224,7 @@ mod tests {
#[test] #[test]
fn test_put() { fn test_put() {
let ns = NamespaceName::new("test").expect("database name is valid"); let ns = NamespaceName::new("test").expect("namespace name is valid");
let registry = metric::Registry::default(); let registry = metric::Registry::default();
let cache = Arc::new(MemoryNamespaceCache::default()); let cache = Arc::new(MemoryNamespaceCache::default());
let cache = Arc::new(InstrumentedCache::new(cache, &registry)); let cache = Arc::new(InstrumentedCache::new(cache, &registry));
@ -356,7 +356,7 @@ mod tests {
assert_eq!(cache.column_count.observe(), Observation::U64Gauge(11)); assert_eq!(cache.column_count.observe(), Observation::U64Gauge(11));
// Add a new namespace // Add a new namespace
let ns = NamespaceName::new("another").expect("database name is valid"); let ns = NamespaceName::new("another").expect("namespace name is valid");
let schema = new_schema(&[10, 12, 9]); let schema = new_schema(&[10, 12, 9]);
assert!(cache.put_schema(ns.clone(), schema).is_none()); assert!(cache.put_schema(ns.clone(), schema).is_none());
assert_histogram_hit( assert_histogram_hit(

View File

@ -55,7 +55,7 @@ mod tests {
.map(char::from) .map(char::from)
.collect::<String>() .collect::<String>()
.try_into() .try_into()
.expect("generated invalid random database name") .expect("generated invalid random namespace name")
} }
fn schema_with_id(id: i64) -> NamespaceSchema { fn schema_with_id(id: i64) -> NamespaceSchema {

View File

@ -145,7 +145,7 @@ impl From<&DmlError> for StatusCode {
} }
/// Errors returned when decoding the organisation / bucket information from a /// Errors returned when decoding the organisation / bucket information from a
/// HTTP request and deriving the database name from it. /// HTTP request and deriving the namespace name from it.
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum OrgBucketError { pub enum OrgBucketError {
/// The request contains no org/bucket destination information. /// The request contains no org/bucket destination information.
@ -156,7 +156,7 @@ pub enum OrgBucketError {
#[error("failed to deserialize org/bucket/precision in request: {0}")] #[error("failed to deserialize org/bucket/precision in request: {0}")]
DecodeFail(#[from] serde::de::value::Error), DecodeFail(#[from] serde::de::value::Error),
/// The provided org/bucket could not be converted into a database name. /// The provided org/bucket could not be converted into a namespace name.
#[error(transparent)] #[error(transparent)]
MappingFail(#[from] OrgBucketMappingError), MappingFail(#[from] OrgBucketMappingError),
} }

View File

@ -5,7 +5,7 @@ use snafu::{ResultExt, Snafu};
use super::{InfluxColumnType, InfluxFieldType, Schema, TIME_COLUMN_NAME}; use super::{InfluxColumnType, InfluxFieldType, Schema, TIME_COLUMN_NAME};
/// Database schema creation / validation errors. /// Namespace schema creation / validation errors.
#[derive(Debug, Snafu)] #[derive(Debug, Snafu)]
pub enum Error { pub enum Error {
#[snafu(display("Error validating schema: {}", source))] #[snafu(display("Error validating schema: {}", source))]

View File

@ -46,7 +46,7 @@ pub mod sort;
pub use builder::SchemaBuilder; pub use builder::SchemaBuilder;
pub use projection::Projection; pub use projection::Projection;
/// Database schema creation / validation errors. /// Namespace schema creation / validation errors.
#[derive(Debug, Snafu)] #[derive(Debug, Snafu)]
pub enum Error { pub enum Error {
#[snafu(display( #[snafu(display(

View File

@ -9,7 +9,7 @@ use crate::interner::SchemaInterner;
use super::{InfluxColumnType, Schema}; use super::{InfluxColumnType, Schema};
/// Database schema creation / validation errors. /// Namespace schema creation / validation errors.
#[derive(Debug, Snafu)] #[derive(Debug, Snafu)]
pub enum Error { pub enum Error {
#[snafu(display("No schemas found when building merged schema"))] #[snafu(display("No schemas found when building merged schema"))]

View File

@ -26,7 +26,7 @@ pub use server_type::{AddAddrEnv, ServerType};
pub use steps::{FCustom, Step, StepTest, StepTestState}; pub use steps::{FCustom, Step, StepTest, StepTestState};
pub use udp_listener::UdpCapture; pub use udp_listener::UdpCapture;
/// Return a random string suitable for use as a database name /// Return a random string suitable for use as a namespace name
pub fn rand_name() -> String { pub fn rand_name() -> String {
thread_rng() thread_rng()
.sample_iter(&Alphanumeric) .sample_iter(&Alphanumeric)