chore(deps): Bump clap from 3.1.18 to 3.2.1 (#4848)
* chore(deps): Bump clap from 3.1.18 to 3.2.1 Bumps [clap](https://github.com/clap-rs/clap) from 3.1.18 to 3.2.1. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v3.1.18...clap_complete-v3.2.1) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> * chore: fix clap deprecations Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Marco Neumann <marco@crepererum.net> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>pull/24376/head
parent
f7fbc67b00
commit
23c9e38ea7
|
@ -688,9 +688,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "3.1.18"
|
||||
version = "3.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d2dbdf4bdacb33466e854ce889eee8dfd5729abf7ccd7664d0a2d60cd384440b"
|
||||
checksum = "a836566fa5f52f7ddf909a8a2f9029b9f78ca584cd95cf7e87f8073110f4c5c9"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"bitflags",
|
||||
|
@ -707,7 +707,7 @@ dependencies = [
|
|||
name = "clap_blocks"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"clap 3.1.18",
|
||||
"clap 3.2.1",
|
||||
"futures",
|
||||
"humantime",
|
||||
"iox_catalog",
|
||||
|
@ -727,9 +727,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "clap_derive"
|
||||
version = "3.1.18"
|
||||
version = "3.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "25320346e922cffe59c0bbc5410c8d8784509efb321488971081313cb1e1a33c"
|
||||
checksum = "986fd75d1dfd2c34eb8c9275ae38ad87ea9478c9b79e87f1801f7d866dfb1e37"
|
||||
dependencies = [
|
||||
"heck 0.4.0",
|
||||
"proc-macro-error",
|
||||
|
@ -740,9 +740,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "clap_lex"
|
||||
version = "0.2.0"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a37c35f1112dad5e6e0b1adaff798507497a18fceeb30cceb3bae7d1427b9213"
|
||||
checksum = "5538cd660450ebeb4234cfecf8f2284b844ffc4c50531e66d584ad5b91293613"
|
||||
dependencies = [
|
||||
"os_str_bytes",
|
||||
]
|
||||
|
@ -2137,7 +2137,7 @@ dependencies = [
|
|||
"assert_cmd",
|
||||
"backtrace",
|
||||
"bytes",
|
||||
"clap 3.1.18",
|
||||
"clap 3.2.1",
|
||||
"clap_blocks",
|
||||
"console-subscriber",
|
||||
"data_types",
|
||||
|
@ -2374,7 +2374,7 @@ version = "0.1.0"
|
|||
dependencies = [
|
||||
"chrono",
|
||||
"chrono-english",
|
||||
"clap 3.1.18",
|
||||
"clap 3.2.1",
|
||||
"criterion",
|
||||
"futures",
|
||||
"handlebars",
|
||||
|
@ -2461,7 +2461,7 @@ dependencies = [
|
|||
"async-trait",
|
||||
"bytes",
|
||||
"chrono",
|
||||
"clap 3.1.18",
|
||||
"clap 3.2.1",
|
||||
"clap_blocks",
|
||||
"data_types",
|
||||
"dml",
|
||||
|
@ -2596,7 +2596,7 @@ name = "ioxd_test"
|
|||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 3.1.18",
|
||||
"clap 3.2.1",
|
||||
"hyper",
|
||||
"ioxd_common",
|
||||
"metric",
|
||||
|
@ -5752,7 +5752,7 @@ version = "0.1.0"
|
|||
dependencies = [
|
||||
"async-trait",
|
||||
"chrono",
|
||||
"clap 3.1.18",
|
||||
"clap 3.2.1",
|
||||
"futures",
|
||||
"observability_deps",
|
||||
"snafu",
|
||||
|
@ -5889,7 +5889,7 @@ dependencies = [
|
|||
name = "trogging"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"clap 3.1.18",
|
||||
"clap 3.2.1",
|
||||
"logfmt",
|
||||
"observability_deps",
|
||||
"regex",
|
||||
|
|
|
@ -50,12 +50,13 @@ pub struct CatalogDsnConfig {
|
|||
arg_enum,
|
||||
long = "--catalog",
|
||||
env = "INFLUXDB_IOX_CATALOG_TYPE",
|
||||
default_value = "postgres"
|
||||
default_value = "postgres",
|
||||
action
|
||||
)]
|
||||
pub(crate) catalog_type_: CatalogType,
|
||||
|
||||
/// Postgres connection string. Required if catalog is set to postgres.
|
||||
#[clap(long = "--catalog-dsn", env = "INFLUXDB_IOX_CATALOG_DSN")]
|
||||
#[clap(long = "--catalog-dsn", env = "INFLUXDB_IOX_CATALOG_DSN", action)]
|
||||
pub dsn: Option<String>,
|
||||
|
||||
/// Maximum number of connections allowed to the catalog at any one time.
|
||||
|
@ -63,6 +64,7 @@ pub struct CatalogDsnConfig {
|
|||
long = "--catalog-max-connections",
|
||||
env = "INFLUXDB_IOX_CATALOG_MAX_CONNECTIONS",
|
||||
default_value = default_max_connections(),
|
||||
action,
|
||||
)]
|
||||
pub max_catalog_connections: u32,
|
||||
|
||||
|
@ -71,6 +73,7 @@ pub struct CatalogDsnConfig {
|
|||
long = "--catalog-postgres-schema-name",
|
||||
env = "INFLUXDB_IOX_CATALOG_POSTGRES_SCHEMA_NAME",
|
||||
default_value = PostgresConnectionOptions::DEFAULT_SCHEMA_NAME,
|
||||
action,
|
||||
)]
|
||||
pub postgres_schema_name: String,
|
||||
|
||||
|
@ -79,7 +82,7 @@ pub struct CatalogDsnConfig {
|
|||
long = "--catalog-connect-timeout",
|
||||
env = "INFLUXDB_IOX_CATALOG_CONNECT_TIMEOUT",
|
||||
default_value = default_connect_timeout(),
|
||||
parse(try_from_str = humantime::parse_duration),
|
||||
value_parser = humantime::parse_duration,
|
||||
)]
|
||||
pub connect_timeout: Duration,
|
||||
|
||||
|
@ -88,7 +91,7 @@ pub struct CatalogDsnConfig {
|
|||
long = "--catalog-idle-timeout",
|
||||
env = "INFLUXDB_IOX_CATALOG_IDLE_TIMEOUT",
|
||||
default_value = default_idle_timeout(),
|
||||
parse(try_from_str = humantime::parse_duration),
|
||||
value_parser = humantime::parse_duration,
|
||||
)]
|
||||
pub idle_timeout: Duration,
|
||||
|
||||
|
@ -100,7 +103,7 @@ pub struct CatalogDsnConfig {
|
|||
long = "--catalog-hotswap-poll-interval",
|
||||
env = "INFLUXDB_IOX_CATALOG_HOTSWAP_POLL_INTERVAL",
|
||||
default_value = default_hotswap_poll_interval_timeout(),
|
||||
parse(try_from_str = humantime::parse_duration),
|
||||
value_parser = humantime::parse_duration,
|
||||
)]
|
||||
pub hotswap_poll_interval: Duration,
|
||||
}
|
||||
|
|
|
@ -6,21 +6,24 @@ pub struct CompactorConfig {
|
|||
#[clap(
|
||||
long = "--write-buffer-topic",
|
||||
env = "INFLUXDB_IOX_WRITE_BUFFER_TOPIC",
|
||||
default_value = "iox-shared"
|
||||
default_value = "iox-shared",
|
||||
action
|
||||
)]
|
||||
pub topic: String,
|
||||
|
||||
/// Write buffer partition number to start (inclusive) range with
|
||||
#[clap(
|
||||
long = "--write-buffer-partition-range-start",
|
||||
env = "INFLUXDB_IOX_WRITE_BUFFER_PARTITION_RANGE_START"
|
||||
env = "INFLUXDB_IOX_WRITE_BUFFER_PARTITION_RANGE_START",
|
||||
action
|
||||
)]
|
||||
pub write_buffer_partition_range_start: i32,
|
||||
|
||||
/// Write buffer partition number to end (inclusive) range with
|
||||
#[clap(
|
||||
long = "--write-buffer-partition-range-end",
|
||||
env = "INFLUXDB_IOX_WRITE_BUFFER_PARTITION_RANGE_END"
|
||||
env = "INFLUXDB_IOX_WRITE_BUFFER_PARTITION_RANGE_END",
|
||||
action
|
||||
)]
|
||||
pub write_buffer_partition_range_end: i32,
|
||||
|
||||
|
@ -29,7 +32,8 @@ pub struct CompactorConfig {
|
|||
#[clap(
|
||||
long = "--compaction-split-percentage",
|
||||
env = "INFLUXDB_IOX_COMPACTION_SPLIT_PERCENTAGE",
|
||||
default_value = "100"
|
||||
default_value = "100",
|
||||
action
|
||||
)]
|
||||
pub split_percentage: i64,
|
||||
|
||||
|
@ -40,7 +44,8 @@ pub struct CompactorConfig {
|
|||
#[clap(
|
||||
long = "--compaction-concurrent-size-bytes",
|
||||
env = "INFLUXDB_IOX_COMPACTION_CONCURRENT_SIZE_BYTES",
|
||||
default_value = "1000000000"
|
||||
default_value = "1000000000",
|
||||
action
|
||||
)]
|
||||
pub max_concurrent_compaction_size_bytes: i64,
|
||||
|
||||
|
@ -50,7 +55,8 @@ pub struct CompactorConfig {
|
|||
#[clap(
|
||||
long = "--compaction-max-size-bytes",
|
||||
env = "INFLUXDB_IOX_COMPACTION_MAX_SIZE_BYTES",
|
||||
default_value = "100000000"
|
||||
default_value = "100000000",
|
||||
action
|
||||
)]
|
||||
pub compaction_max_size_bytes: i64,
|
||||
|
||||
|
@ -59,7 +65,8 @@ pub struct CompactorConfig {
|
|||
#[clap(
|
||||
long = "--compaction-max-file-count",
|
||||
env = "INFLUXDB_IOX_COMPACTION_MAX_FILE_COUNT",
|
||||
default_value = "100"
|
||||
default_value = "100",
|
||||
action
|
||||
)]
|
||||
pub compaction_max_file_count: i64,
|
||||
}
|
||||
|
|
|
@ -4,14 +4,16 @@ pub struct IngesterConfig {
|
|||
/// Write buffer partition number to start (inclusive) range with
|
||||
#[clap(
|
||||
long = "--write-buffer-partition-range-start",
|
||||
env = "INFLUXDB_IOX_WRITE_BUFFER_PARTITION_RANGE_START"
|
||||
env = "INFLUXDB_IOX_WRITE_BUFFER_PARTITION_RANGE_START",
|
||||
action
|
||||
)]
|
||||
pub write_buffer_partition_range_start: i32,
|
||||
|
||||
/// Write buffer partition number to end (inclusive) range with
|
||||
#[clap(
|
||||
long = "--write-buffer-partition-range-end",
|
||||
env = "INFLUXDB_IOX_WRITE_BUFFER_PARTITION_RANGE_END"
|
||||
env = "INFLUXDB_IOX_WRITE_BUFFER_PARTITION_RANGE_END",
|
||||
action
|
||||
)]
|
||||
pub write_buffer_partition_range_end: i32,
|
||||
|
||||
|
@ -20,7 +22,8 @@ pub struct IngesterConfig {
|
|||
/// write buffer will pause until the ingester buffer goes below this threshold.
|
||||
#[clap(
|
||||
long = "--pause-ingest-size-bytes",
|
||||
env = "INFLUXDB_IOX_PAUSE_INGEST_SIZE_BYTES"
|
||||
env = "INFLUXDB_IOX_PAUSE_INGEST_SIZE_BYTES",
|
||||
action
|
||||
)]
|
||||
pub pause_ingest_size_bytes: usize,
|
||||
|
||||
|
@ -29,7 +32,8 @@ pub struct IngesterConfig {
|
|||
/// ingester running in a steady state is expected to take up this much memory.
|
||||
#[clap(
|
||||
long = "--persist-memory-threshold-bytes",
|
||||
env = "INFLUXDB_IOX_PERSIST_MEMORY_THRESHOLD_BYTES"
|
||||
env = "INFLUXDB_IOX_PERSIST_MEMORY_THRESHOLD_BYTES",
|
||||
action
|
||||
)]
|
||||
pub persist_memory_threshold_bytes: usize,
|
||||
|
||||
|
@ -38,7 +42,8 @@ pub struct IngesterConfig {
|
|||
#[clap(
|
||||
long = "--persist-partition-size-threshold-bytes",
|
||||
env = "INFLUXDB_IOX_PERSIST_PARTITION_SIZE_THRESHOLD_BYTES",
|
||||
default_value = "314572800"
|
||||
default_value = "314572800",
|
||||
action
|
||||
)]
|
||||
pub persist_partition_size_threshold_bytes: usize,
|
||||
|
||||
|
@ -48,7 +53,8 @@ pub struct IngesterConfig {
|
|||
#[clap(
|
||||
long = "--persist-partition-age-threshold-seconds",
|
||||
env = "INFLUXDB_IOX_PERSIST_PARTITION_AGE_THRESHOLD_SECONDS",
|
||||
default_value = "1800"
|
||||
default_value = "1800",
|
||||
action
|
||||
)]
|
||||
pub persist_partition_age_threshold_seconds: u64,
|
||||
|
||||
|
@ -57,7 +63,8 @@ pub struct IngesterConfig {
|
|||
#[clap(
|
||||
long = "--persist-partition-cold-threshold-seconds",
|
||||
env = "INFLUXDB_IOX_PERSIST_PARTITION_COLD_THRESHOLD_SECONDS",
|
||||
default_value = "300"
|
||||
default_value = "300",
|
||||
action
|
||||
)]
|
||||
pub persist_partition_cold_threshold_seconds: u64,
|
||||
|
||||
|
@ -67,7 +74,8 @@ pub struct IngesterConfig {
|
|||
/// write buffer and will start up successfully with the oldest available data.
|
||||
#[clap(
|
||||
long = "--skip-to-oldest-available",
|
||||
env = "INFLUXDB_IOX_SKIP_TO_OLDEST_AVAILABLE"
|
||||
env = "INFLUXDB_IOX_SKIP_TO_OLDEST_AVAILABLE",
|
||||
action
|
||||
)]
|
||||
pub skip_to_oldest_available: bool,
|
||||
|
||||
|
@ -77,7 +85,8 @@ pub struct IngesterConfig {
|
|||
#[clap(
|
||||
long = "--test-flight-do-get-panic",
|
||||
env = "INFLUXDB_IOX_FLIGHT_DO_GET_PANIC",
|
||||
default_value = "0"
|
||||
default_value = "0",
|
||||
action
|
||||
)]
|
||||
pub test_flight_do_get_panic: u64,
|
||||
|
||||
|
@ -86,7 +95,8 @@ pub struct IngesterConfig {
|
|||
#[clap(
|
||||
long = "--concurrent-request-limit",
|
||||
env = "INFLUXDB_IOX_CONCURRENT_REQEST_LIMIT",
|
||||
default_value = "20"
|
||||
default_value = "20",
|
||||
action
|
||||
)]
|
||||
pub concurrent_request_limit: usize,
|
||||
}
|
||||
|
|
|
@ -67,7 +67,8 @@ Possible values (case insensitive):
|
|||
* google: Google Cloud Storage. Must also set `--bucket` and `--google-service-account`.
|
||||
* azure: Microsoft Azure blob storage. Must also set `--bucket`, `--azure-storage-account`,
|
||||
and `--azure-storage-access-key`.
|
||||
"#
|
||||
"#,
|
||||
action
|
||||
)]
|
||||
pub object_store: Option<ObjectStoreType>,
|
||||
|
||||
|
@ -85,11 +86,11 @@ Possible values (case insensitive):
|
|||
/// container you've created in the associated storage account, under
|
||||
/// Blob Service > Containers. Must also set `--azure-storage-account` and
|
||||
/// `--azure-storage-access-key`.
|
||||
#[clap(long = "--bucket", env = "INFLUXDB_IOX_BUCKET")]
|
||||
#[clap(long = "--bucket", env = "INFLUXDB_IOX_BUCKET", action)]
|
||||
pub bucket: Option<String>,
|
||||
|
||||
/// The location InfluxDB IOx will use to store files locally.
|
||||
#[clap(long = "--data-dir", env = "INFLUXDB_IOX_DB_DIR")]
|
||||
#[clap(long = "--data-dir", env = "INFLUXDB_IOX_DB_DIR", action)]
|
||||
pub database_directory: Option<PathBuf>,
|
||||
|
||||
/// When using Amazon S3 as the object store, set this to an access key that
|
||||
|
@ -101,7 +102,7 @@ Possible values (case insensitive):
|
|||
///
|
||||
/// Prefer the environment variable over the command line flag in shared
|
||||
/// environments.
|
||||
#[clap(long = "--aws-access-key-id", env = "AWS_ACCESS_KEY_ID")]
|
||||
#[clap(long = "--aws-access-key-id", env = "AWS_ACCESS_KEY_ID", action)]
|
||||
pub aws_access_key_id: Option<String>,
|
||||
|
||||
/// When using Amazon S3 as the object store, set this to the secret access
|
||||
|
@ -112,7 +113,11 @@ Possible values (case insensitive):
|
|||
///
|
||||
/// Prefer the environment variable over the command line flag in shared
|
||||
/// environments.
|
||||
#[clap(long = "--aws-secret-access-key", env = "AWS_SECRET_ACCESS_KEY")]
|
||||
#[clap(
|
||||
long = "--aws-secret-access-key",
|
||||
env = "AWS_SECRET_ACCESS_KEY",
|
||||
action
|
||||
)]
|
||||
pub aws_secret_access_key: Option<String>,
|
||||
|
||||
/// When using Amazon S3 as the object store, set this to the region
|
||||
|
@ -122,9 +127,10 @@ Possible values (case insensitive):
|
|||
/// Must also set `--object-store=s3`, `--bucket`, `--aws-access-key-id`,
|
||||
/// and `--aws-secret-access-key`.
|
||||
#[clap(
|
||||
long = "--aws-default-region",
|
||||
env = "AWS_DEFAULT_REGION",
|
||||
default_value = FALLBACK_AWS_REGION,
|
||||
long = "--aws-default-region",
|
||||
env = "AWS_DEFAULT_REGION",
|
||||
default_value = FALLBACK_AWS_REGION,
|
||||
action,
|
||||
)]
|
||||
pub aws_default_region: String,
|
||||
|
||||
|
@ -136,7 +142,7 @@ Possible values (case insensitive):
|
|||
///
|
||||
/// Prefer the environment variable over the command line flag in shared
|
||||
/// environments.
|
||||
#[clap(long = "--aws-endpoint", env = "AWS_ENDPOINT")]
|
||||
#[clap(long = "--aws-endpoint", env = "AWS_ENDPOINT", action)]
|
||||
pub aws_endpoint: Option<String>,
|
||||
|
||||
/// When using Amazon S3 as an object store, set this to the session token. This is handy when using a federated
|
||||
|
@ -146,18 +152,22 @@ Possible values (case insensitive):
|
|||
///
|
||||
/// Prefer the environment variable over the command line flag in shared
|
||||
/// environments.
|
||||
#[clap(long = "--aws-session-token", env = "AWS_SESSION_TOKEN")]
|
||||
#[clap(long = "--aws-session-token", env = "AWS_SESSION_TOKEN", action)]
|
||||
pub aws_session_token: Option<String>,
|
||||
|
||||
/// Allow unencrypted HTTP connection to AWS.
|
||||
#[clap(long = "--aws-allow-http", env = "AWS_ALLOW_HTTP")]
|
||||
#[clap(long = "--aws-allow-http", env = "AWS_ALLOW_HTTP", action)]
|
||||
pub aws_allow_http: bool,
|
||||
|
||||
/// When using Google Cloud Storage as the object store, set this to the
|
||||
/// path to the JSON file that contains the Google credentials.
|
||||
///
|
||||
/// Must also set `--object-store=google` and `--bucket`.
|
||||
#[clap(long = "--google-service-account", env = "GOOGLE_SERVICE_ACCOUNT")]
|
||||
#[clap(
|
||||
long = "--google-service-account",
|
||||
env = "GOOGLE_SERVICE_ACCOUNT",
|
||||
action
|
||||
)]
|
||||
pub google_service_account: Option<String>,
|
||||
|
||||
/// When using Microsoft Azure as the object store, set this to the
|
||||
|
@ -165,7 +175,11 @@ Possible values (case insensitive):
|
|||
///
|
||||
/// Must also set `--object-store=azure`, `--bucket`, and
|
||||
/// `--azure-storage-access-key`.
|
||||
#[clap(long = "--azure-storage-account", env = "AZURE_STORAGE_ACCOUNT")]
|
||||
#[clap(
|
||||
long = "--azure-storage-account",
|
||||
env = "AZURE_STORAGE_ACCOUNT",
|
||||
action
|
||||
)]
|
||||
pub azure_storage_account: Option<String>,
|
||||
|
||||
/// When using Microsoft Azure as the object store, set this to one of the
|
||||
|
@ -176,14 +190,19 @@ Possible values (case insensitive):
|
|||
///
|
||||
/// Prefer the environment variable over the command line flag in shared
|
||||
/// environments.
|
||||
#[clap(long = "--azure-storage-access-key", env = "AZURE_STORAGE_ACCESS_KEY")]
|
||||
#[clap(
|
||||
long = "--azure-storage-access-key",
|
||||
env = "AZURE_STORAGE_ACCESS_KEY",
|
||||
action
|
||||
)]
|
||||
pub azure_storage_access_key: Option<String>,
|
||||
|
||||
/// When using a network-based object store, limit the number of connection to this value.
|
||||
#[clap(
|
||||
long = "--object-store-connection-limit",
|
||||
env = "OBJECT_STORE_CONNECTION_LIMIT",
|
||||
default_value = "16"
|
||||
default_value = "16",
|
||||
action
|
||||
)]
|
||||
pub object_store_connection_limit: NonZeroUsize,
|
||||
}
|
||||
|
|
|
@ -14,7 +14,11 @@ pub struct QuerierConfig {
|
|||
/// The number of threads to use for queries.
|
||||
///
|
||||
/// If not specified, defaults to the number of cores on the system
|
||||
#[clap(long = "--num-query-threads", env = "INFLUXDB_IOX_NUM_QUERY_THREADS")]
|
||||
#[clap(
|
||||
long = "--num-query-threads",
|
||||
env = "INFLUXDB_IOX_NUM_QUERY_THREADS",
|
||||
action
|
||||
)]
|
||||
pub num_query_threads: Option<usize>,
|
||||
|
||||
/// gRPC address for the querier to talk with the ingester. For
|
||||
|
@ -34,7 +38,8 @@ pub struct QuerierConfig {
|
|||
long = "--ingester-address",
|
||||
env = "INFLUXDB_IOX_INGESTER_ADDRESSES",
|
||||
multiple_values = true,
|
||||
use_value_delimiter = true
|
||||
use_value_delimiter = true,
|
||||
action
|
||||
)]
|
||||
pub ingester_addresses: Vec<String>,
|
||||
|
||||
|
@ -42,7 +47,8 @@ pub struct QuerierConfig {
|
|||
#[clap(
|
||||
long = "--ram-pool-bytes",
|
||||
env = "INFLUXDB_IOX_RAM_POOL_BYTES",
|
||||
default_value = "1073741824"
|
||||
default_value = "1073741824",
|
||||
action
|
||||
)]
|
||||
pub ram_pool_bytes: usize,
|
||||
|
||||
|
@ -50,7 +56,8 @@ pub struct QuerierConfig {
|
|||
#[clap(
|
||||
long = "--max-concurrent-queries",
|
||||
env = "INFLUXDB_IOX_MAX_CONCURRENT_QUERIES",
|
||||
default_value = "10"
|
||||
default_value = "10",
|
||||
action
|
||||
)]
|
||||
pub max_concurrent_queries: usize,
|
||||
}
|
||||
|
|
|
@ -22,17 +22,19 @@ pub struct RunConfig {
|
|||
|
||||
/// The address on which IOx will serve HTTP API requests.
|
||||
#[clap(
|
||||
long = "--api-bind",
|
||||
env = "INFLUXDB_IOX_BIND_ADDR",
|
||||
default_value = DEFAULT_API_BIND_ADDR,
|
||||
long = "--api-bind",
|
||||
env = "INFLUXDB_IOX_BIND_ADDR",
|
||||
default_value = DEFAULT_API_BIND_ADDR,
|
||||
action,
|
||||
)]
|
||||
pub http_bind_address: SocketAddr,
|
||||
|
||||
/// The address on which IOx will serve Storage gRPC API requests.
|
||||
#[clap(
|
||||
long = "--grpc-bind",
|
||||
env = "INFLUXDB_IOX_GRPC_BIND_ADDR",
|
||||
default_value = DEFAULT_GRPC_BIND_ADDR,
|
||||
long = "--grpc-bind",
|
||||
env = "INFLUXDB_IOX_GRPC_BIND_ADDR",
|
||||
default_value = DEFAULT_GRPC_BIND_ADDR,
|
||||
action,
|
||||
)]
|
||||
pub grpc_bind_address: SocketAddr,
|
||||
|
||||
|
@ -40,7 +42,8 @@ pub struct RunConfig {
|
|||
#[clap(
|
||||
long = "--max-http-request-size",
|
||||
env = "INFLUXDB_IOX_MAX_HTTP_REQUEST_SIZE",
|
||||
default_value = "10485760" // 10 MiB
|
||||
default_value = "10485760", // 10 MiB
|
||||
action,
|
||||
)]
|
||||
pub max_http_request_size: usize,
|
||||
|
||||
|
|
|
@ -13,18 +13,27 @@ pub struct WriteBufferConfig {
|
|||
/// The type of write buffer to use.
|
||||
///
|
||||
/// Valid options are: file, kafka
|
||||
#[clap(long = "--write-buffer", env = "INFLUXDB_IOX_WRITE_BUFFER_TYPE")]
|
||||
#[clap(
|
||||
long = "--write-buffer",
|
||||
env = "INFLUXDB_IOX_WRITE_BUFFER_TYPE",
|
||||
action
|
||||
)]
|
||||
pub(crate) type_: String,
|
||||
|
||||
/// The address to the write buffer.
|
||||
#[clap(long = "--write-buffer-addr", env = "INFLUXDB_IOX_WRITE_BUFFER_ADDR")]
|
||||
#[clap(
|
||||
long = "--write-buffer-addr",
|
||||
env = "INFLUXDB_IOX_WRITE_BUFFER_ADDR",
|
||||
action
|
||||
)]
|
||||
pub(crate) connection_string: String,
|
||||
|
||||
/// Write buffer topic/database that should be used.
|
||||
#[clap(
|
||||
long = "--write-buffer-topic",
|
||||
env = "INFLUXDB_IOX_WRITE_BUFFER_TOPIC",
|
||||
default_value = "iox-shared"
|
||||
default_value = "iox-shared",
|
||||
action
|
||||
)]
|
||||
pub(crate) topic: String,
|
||||
|
||||
|
@ -41,14 +50,16 @@ pub struct WriteBufferConfig {
|
|||
env = "INFLUXDB_IOX_WRITE_BUFFER_CONNECTION_CONFIG",
|
||||
default_value = "",
|
||||
multiple_values = true,
|
||||
use_value_delimiter = true
|
||||
use_value_delimiter = true,
|
||||
action
|
||||
)]
|
||||
pub(crate) connection_config: Vec<String>,
|
||||
|
||||
/// The number of topics to create automatically, if any. Default is to not create any topics.
|
||||
#[clap(
|
||||
long = "--write-buffer-auto-create-topics",
|
||||
env = "INFLUXDB_IOX_WRITE_BUFFER_AUTO_CREATE_TOPICS"
|
||||
env = "INFLUXDB_IOX_WRITE_BUFFER_AUTO_CREATE_TOPICS",
|
||||
action
|
||||
)]
|
||||
pub(crate) auto_create_topics: Option<NonZeroU32>,
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ struct Update {
|
|||
catalog_dsn: CatalogDsnConfig,
|
||||
|
||||
/// The name of the topic
|
||||
#[clap(action)]
|
||||
db_name: String,
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,8 @@ pub struct Config {
|
|||
/// Get the schema of a namespace
|
||||
#[derive(Debug, clap::Parser)]
|
||||
struct Get {
|
||||
// The name of the namespace for which you want to fetch the schema
|
||||
/// The name of the namespace for which you want to fetch the schema
|
||||
#[clap(action)]
|
||||
namespace: String,
|
||||
}
|
||||
|
||||
|
|
|
@ -21,13 +21,15 @@ pub type Result<T, E = Error> = std::result::Result<T, E>;
|
|||
#[derive(Debug, clap::Parser)]
|
||||
pub struct Config {
|
||||
/// The IOx namespace to query
|
||||
#[clap(action)]
|
||||
namespace: String,
|
||||
|
||||
/// The query to run, in SQL format
|
||||
#[clap(action)]
|
||||
query: String,
|
||||
|
||||
/// Optional format ('pretty', 'json', or 'csv')
|
||||
#[clap(short, long, default_value = "pretty")]
|
||||
#[clap(short, long, default_value = "pretty", action)]
|
||||
format: String,
|
||||
}
|
||||
|
||||
|
|
|
@ -27,22 +27,29 @@ pub type Result<T, E = Error> = std::result::Result<T, E>;
|
|||
#[derive(Debug, clap::Parser)]
|
||||
pub struct Config {
|
||||
/// The IOx namespace to query
|
||||
#[clap(action)]
|
||||
namespace: String,
|
||||
|
||||
/// The table for which to retrieve data
|
||||
#[clap(action)]
|
||||
table: String,
|
||||
|
||||
/// The columns to request
|
||||
#[clap(long = "--columns", multiple_values = true, use_value_delimiter = true)]
|
||||
#[clap(
|
||||
long = "--columns",
|
||||
multiple_values = true,
|
||||
use_value_delimiter = true,
|
||||
action
|
||||
)]
|
||||
columns: Vec<String>,
|
||||
|
||||
/// Predicate in base64 protobuf encoded form.
|
||||
/// (logged on error)
|
||||
#[clap(long = "--predicate-base64")]
|
||||
#[clap(long = "--predicate-base64", action)]
|
||||
predicate_base64: Option<String>,
|
||||
|
||||
/// Optional format ('pretty', 'json', or 'csv')
|
||||
#[clap(short, long, default_value = "pretty")]
|
||||
#[clap(short, long, default_value = "pretty", action)]
|
||||
format: String,
|
||||
}
|
||||
|
||||
|
|
|
@ -73,6 +73,7 @@ pub struct Config {
|
|||
#[derive(Debug, clap::Parser)]
|
||||
struct Show {
|
||||
/// The id of the partition
|
||||
#[clap(action)]
|
||||
id: i64,
|
||||
}
|
||||
|
||||
|
@ -86,12 +87,15 @@ struct Pull {
|
|||
object_store: ObjectStoreConfig,
|
||||
|
||||
/// The namespace we're getting the partition from
|
||||
#[clap(action)]
|
||||
namespace: String,
|
||||
|
||||
/// The table name
|
||||
#[clap(action)]
|
||||
table: String,
|
||||
|
||||
/// The partition key
|
||||
#[clap(action)]
|
||||
partition_key: String,
|
||||
}
|
||||
|
||||
|
|
|
@ -30,8 +30,11 @@ pub struct Config {
|
|||
#[derive(Debug, clap::Parser)]
|
||||
struct Get {
|
||||
/// The object store uuid of the parquet file
|
||||
#[clap(action)]
|
||||
uuid: String,
|
||||
|
||||
/// The filename to write the data to
|
||||
#[clap(action)]
|
||||
file_name: String,
|
||||
}
|
||||
|
||||
|
|
|
@ -154,17 +154,18 @@ pub struct Config {
|
|||
#[clap(
|
||||
long = "--max-http-request-size",
|
||||
env = "INFLUXDB_IOX_MAX_HTTP_REQUEST_SIZE",
|
||||
default_value = "10485760" // 10 MiB
|
||||
default_value = "10485760", // 10 MiB
|
||||
action,
|
||||
)]
|
||||
pub max_http_request_size: usize,
|
||||
|
||||
/// The location InfluxDB IOx will use to store files locally. If not specified, will run in
|
||||
/// ephemeral mode.
|
||||
#[clap(long = "--data-dir", env = "INFLUXDB_IOX_DB_DIR")]
|
||||
#[clap(long = "--data-dir", env = "INFLUXDB_IOX_DB_DIR", action)]
|
||||
pub database_directory: Option<PathBuf>,
|
||||
|
||||
/// Postgres connection string. If not specified, will use an in-memory catalog.
|
||||
#[clap(long = "--catalog-dsn", env = "INFLUXDB_IOX_CATALOG_DSN")]
|
||||
#[clap(long = "--catalog-dsn", env = "INFLUXDB_IOX_CATALOG_DSN", action)]
|
||||
pub dsn: Option<String>,
|
||||
|
||||
/// Schema name for PostgreSQL-based catalogs.
|
||||
|
@ -172,6 +173,7 @@ pub struct Config {
|
|||
long = "--catalog-postgres-schema-name",
|
||||
env = "INFLUXDB_IOX_CATALOG_POSTGRES_SCHEMA_NAME",
|
||||
default_value = iox_catalog::postgres::PostgresConnectionOptions::DEFAULT_SCHEMA_NAME,
|
||||
action,
|
||||
)]
|
||||
pub postgres_schema_name: String,
|
||||
|
||||
|
@ -182,7 +184,8 @@ pub struct Config {
|
|||
#[clap(
|
||||
long = "--pause-ingest-size-bytes",
|
||||
env = "INFLUXDB_IOX_PAUSE_INGEST_SIZE_BYTES",
|
||||
default_value = "107374182400"
|
||||
default_value = "107374182400",
|
||||
action
|
||||
)]
|
||||
pub pause_ingest_size_bytes: usize,
|
||||
|
||||
|
@ -194,7 +197,8 @@ pub struct Config {
|
|||
#[clap(
|
||||
long = "--persist-memory-threshold-bytes",
|
||||
env = "INFLUXDB_IOX_PERSIST_MEMORY_THRESHOLD_BYTES",
|
||||
default_value = "1073741824"
|
||||
default_value = "1073741824",
|
||||
action
|
||||
)]
|
||||
pub persist_memory_threshold_bytes: usize,
|
||||
|
||||
|
@ -203,7 +207,8 @@ pub struct Config {
|
|||
#[clap(
|
||||
long = "--persist-partition-size-threshold-bytes",
|
||||
env = "INFLUXDB_IOX_PERSIST_PARTITION_SIZE_THRESHOLD_BYTES",
|
||||
default_value = "314572800"
|
||||
default_value = "314572800",
|
||||
action
|
||||
)]
|
||||
pub persist_partition_size_threshold_bytes: usize,
|
||||
|
||||
|
@ -214,7 +219,8 @@ pub struct Config {
|
|||
#[clap(
|
||||
long = "--persist-partition-age-threshold-seconds",
|
||||
env = "INFLUXDB_IOX_PERSIST_PARTITION_AGE_THRESHOLD_SECONDS",
|
||||
default_value = "1800"
|
||||
default_value = "1800",
|
||||
action
|
||||
)]
|
||||
pub persist_partition_age_threshold_seconds: u64,
|
||||
|
||||
|
@ -223,51 +229,57 @@ pub struct Config {
|
|||
#[clap(
|
||||
long = "--persist-partition-cold-threshold-seconds",
|
||||
env = "INFLUXDB_IOX_PERSIST_PARTITION_COLD_THRESHOLD_SECONDS",
|
||||
default_value = "300"
|
||||
default_value = "300",
|
||||
action
|
||||
)]
|
||||
pub persist_partition_cold_threshold_seconds: u64,
|
||||
|
||||
/// The address on which IOx will serve Router HTTP API requests
|
||||
#[clap(
|
||||
long = "--router-http-bind",
|
||||
// by default, write API requests go to router
|
||||
alias = "api-bind",
|
||||
env = "INFLUXDB_IOX_ROUTER_HTTP_BIND_ADDR",
|
||||
default_value = DEFAULT_ROUTER_HTTP_BIND_ADDR,
|
||||
long = "--router-http-bind",
|
||||
// by default, write API requests go to router
|
||||
alias = "api-bind",
|
||||
env = "INFLUXDB_IOX_ROUTER_HTTP_BIND_ADDR",
|
||||
default_value = DEFAULT_ROUTER_HTTP_BIND_ADDR,
|
||||
action,
|
||||
)]
|
||||
pub router_http_bind_address: SocketAddr,
|
||||
|
||||
/// The address on which IOx will serve Router gRPC API requests
|
||||
#[clap(
|
||||
long = "--router-grpc-bind",
|
||||
env = "INFLUXDB_IOX_ROUTER_GRPC_BIND_ADDR",
|
||||
default_value = DEFAULT_ROUTER_GRPC_BIND_ADDR,
|
||||
long = "--router-grpc-bind",
|
||||
env = "INFLUXDB_IOX_ROUTER_GRPC_BIND_ADDR",
|
||||
default_value = DEFAULT_ROUTER_GRPC_BIND_ADDR,
|
||||
action,
|
||||
)]
|
||||
pub router_grpc_bind_address: SocketAddr,
|
||||
|
||||
/// The address on which IOx will serve Querier gRPC API requests
|
||||
#[clap(
|
||||
long = "--querier-grpc-bind",
|
||||
// by default, grpc requests go to querier
|
||||
alias = "grpc-bind",
|
||||
env = "INFLUXDB_IOX_QUERIER_GRPC_BIND_ADDR",
|
||||
default_value = DEFAULT_QUERIER_GRPC_BIND_ADDR,
|
||||
long = "--querier-grpc-bind",
|
||||
// by default, grpc requests go to querier
|
||||
alias = "grpc-bind",
|
||||
env = "INFLUXDB_IOX_QUERIER_GRPC_BIND_ADDR",
|
||||
default_value = DEFAULT_QUERIER_GRPC_BIND_ADDR,
|
||||
action,
|
||||
)]
|
||||
pub querier_grpc_bind_address: SocketAddr,
|
||||
|
||||
/// The address on which IOx will serve Router Ingester API requests
|
||||
#[clap(
|
||||
long = "--ingester-grpc-bind",
|
||||
env = "INFLUXDB_IOX_INGESTER_GRPC_BIND_ADDR",
|
||||
default_value = DEFAULT_INGESTER_GRPC_BIND_ADDR,
|
||||
long = "--ingester-grpc-bind",
|
||||
env = "INFLUXDB_IOX_INGESTER_GRPC_BIND_ADDR",
|
||||
default_value = DEFAULT_INGESTER_GRPC_BIND_ADDR,
|
||||
action,
|
||||
)]
|
||||
pub ingester_grpc_bind_address: SocketAddr,
|
||||
|
||||
/// The address on which IOx will serve Router Compactor API requests
|
||||
#[clap(
|
||||
long = "--compactor-grpc-bind",
|
||||
env = "INFLUXDB_IOX_COMPACTOR_GRPC_BIND_ADDR",
|
||||
default_value = DEFAULT_COMPACTOR_GRPC_BIND_ADDR,
|
||||
long = "--compactor-grpc-bind",
|
||||
env = "INFLUXDB_IOX_COMPACTOR_GRPC_BIND_ADDR",
|
||||
default_value = DEFAULT_COMPACTOR_GRPC_BIND_ADDR,
|
||||
action,
|
||||
)]
|
||||
pub compactor_grpc_bind_address: SocketAddr,
|
||||
|
||||
|
@ -275,7 +287,8 @@ pub struct Config {
|
|||
#[clap(
|
||||
long = "--querier-ram-pool-bytes",
|
||||
env = "INFLUXDB_IOX_QUERIER_RAM_POOL_BYTES",
|
||||
default_value = "1073741824"
|
||||
default_value = "1073741824",
|
||||
action
|
||||
)]
|
||||
pub querier_ram_pool_bytes: usize,
|
||||
|
||||
|
@ -283,7 +296,8 @@ pub struct Config {
|
|||
#[clap(
|
||||
long = "--querier-max-concurrent-queries",
|
||||
env = "INFLUXDB_IOX_QUERIER_MAX_CONCURRENT_QUERIES",
|
||||
default_value = "10"
|
||||
default_value = "10",
|
||||
action
|
||||
)]
|
||||
pub querier_max_concurrent_queries: usize,
|
||||
}
|
||||
|
|
|
@ -68,7 +68,8 @@ pub struct Config {
|
|||
#[clap(
|
||||
long = "--query-exec-thread-count",
|
||||
env = "INFLUXDB_IOX_QUERY_EXEC_THREAD_COUNT",
|
||||
default_value = "4"
|
||||
default_value = "4",
|
||||
action
|
||||
)]
|
||||
pub query_exec_thread_count: usize,
|
||||
}
|
||||
|
|
|
@ -69,7 +69,8 @@ pub struct Config {
|
|||
#[clap(
|
||||
long = "--query-exec-thread-count",
|
||||
env = "INFLUXDB_IOX_QUERY_EXEC_THREAD_COUNT",
|
||||
default_value = "4"
|
||||
default_value = "4",
|
||||
action
|
||||
)]
|
||||
pub query_exec_thread_count: usize,
|
||||
}
|
||||
|
|
|
@ -66,7 +66,8 @@ pub struct Config {
|
|||
#[clap(
|
||||
long = "--query-pool",
|
||||
env = "INFLUXDB_IOX_QUERY_POOL_NAME",
|
||||
default_value = "iox-shared"
|
||||
default_value = "iox-shared",
|
||||
action
|
||||
)]
|
||||
pub(crate) query_pool_name: String,
|
||||
|
||||
|
@ -82,7 +83,8 @@ pub struct Config {
|
|||
#[clap(
|
||||
long = "--max-http-requests",
|
||||
env = "INFLUXDB_IOX_MAX_HTTP_REQUESTS",
|
||||
default_value = "200"
|
||||
default_value = "200",
|
||||
action
|
||||
)]
|
||||
pub(crate) http_request_limit: usize,
|
||||
}
|
||||
|
|
|
@ -47,7 +47,8 @@ pub struct Config {
|
|||
long = "--test-action",
|
||||
env = "IOX_TEST_ACTION",
|
||||
default_value = "None",
|
||||
ignore_case = true
|
||||
ignore_case = true,
|
||||
action
|
||||
)]
|
||||
test_action: TestAction,
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ pub struct Config {
|
|||
/// `SET FORMAT` command
|
||||
///
|
||||
/// Optional format ('pretty', 'json', or 'csv')
|
||||
#[clap(short, long, default_value = "pretty")]
|
||||
#[clap(short, long, default_value = "pretty", action)]
|
||||
format: String,
|
||||
}
|
||||
|
||||
|
|
|
@ -48,22 +48,44 @@ pub struct Config {
|
|||
command: Command,
|
||||
|
||||
/// The name of the database
|
||||
#[clap(parse(try_from_str = parse_db_name))]
|
||||
#[clap(
|
||||
value_parser = parse_db_name,
|
||||
)]
|
||||
db_name: OrgAndBucket,
|
||||
|
||||
/// The requested start time (inclusive) of the time-range (also accepts RFC3339 format).
|
||||
#[clap(global = true, long, default_value = "-9223372036854775806", parse(try_from_str = parse_range))]
|
||||
#[clap(
|
||||
global = true,
|
||||
long,
|
||||
default_value = "-9223372036854775806",
|
||||
value_parser = parse_range,
|
||||
)]
|
||||
pub start: i64,
|
||||
|
||||
/// The requested stop time (exclusive) of the time-range (also accepts RFC3339 format).
|
||||
#[clap(global = true, long, default_value = "9223372036854775806", parse(try_from_str = parse_range))]
|
||||
#[clap(
|
||||
global = true,
|
||||
long,
|
||||
default_value = "9223372036854775806",
|
||||
value_parser = parse_range,
|
||||
)]
|
||||
pub stop: i64,
|
||||
|
||||
/// A predicate to filter results by. Effectively InfluxQL predicate format (see examples).
|
||||
#[clap(global = true, long, default_value = "", parse(try_from_str = parse_predicate))]
|
||||
#[clap(
|
||||
global = true,
|
||||
long,
|
||||
default_value = "",
|
||||
value_parser = parse_predicate,
|
||||
)]
|
||||
pub predicate: Predicate,
|
||||
|
||||
#[clap(global = true, long, default_value = "pretty", parse(try_from_str = parse_format))]
|
||||
#[clap(
|
||||
global = true,
|
||||
long,
|
||||
default_value = "pretty",
|
||||
value_parser = parse_format,
|
||||
)]
|
||||
pub format: Format,
|
||||
}
|
||||
|
||||
|
@ -132,7 +154,7 @@ fn parse_format(format: &str) -> Result<Format, ParseError> {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
#[derive(Clone, Copy, Debug, clap::Parser)]
|
||||
pub enum Format {
|
||||
Pretty,
|
||||
Quiet,
|
||||
|
@ -149,18 +171,28 @@ enum Command {
|
|||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
struct MeasurementFields {
|
||||
#[clap(action)]
|
||||
measurement: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Parser)]
|
||||
struct ReadWindowAggregate {
|
||||
#[clap(long, parse(try_from_str = humantime::parse_duration))]
|
||||
#[clap(
|
||||
long,
|
||||
value_parser = humantime::parse_duration,
|
||||
)]
|
||||
window_every: Duration,
|
||||
|
||||
#[clap(long, parse(try_from_str = humantime::parse_duration))]
|
||||
#[clap(
|
||||
long,
|
||||
value_parser = humantime::parse_duration,
|
||||
)]
|
||||
offset: Duration,
|
||||
|
||||
#[clap(long, parse(try_from_str = parse_aggregate))]
|
||||
#[clap(
|
||||
long,
|
||||
value_parser = parse_aggregate,
|
||||
)]
|
||||
aggregate: Vec<AggregateType>,
|
||||
}
|
||||
|
||||
|
@ -182,6 +214,7 @@ fn parse_aggregate(aggs: &str) -> Result<AggregateType, ParseError> {
|
|||
#[derive(Debug, clap::Parser)]
|
||||
struct TagValues {
|
||||
/// The tag key value to interrogate for tag values.
|
||||
#[clap(action)]
|
||||
tag_key: String,
|
||||
}
|
||||
|
||||
|
|
|
@ -22,9 +22,11 @@ pub type Result<T, E = Error> = std::result::Result<T, E>;
|
|||
#[derive(Debug, clap::Parser)]
|
||||
pub struct Config {
|
||||
/// The name of the database
|
||||
#[clap(action)]
|
||||
name: String,
|
||||
|
||||
/// File with data to load. Currently supported formats are .lp
|
||||
#[clap(action)]
|
||||
file_name: PathBuf,
|
||||
}
|
||||
|
||||
|
|
|
@ -103,29 +103,35 @@ struct Config {
|
|||
long,
|
||||
global = true,
|
||||
env = "IOX_ADDR",
|
||||
default_value = "http://127.0.0.1:8082"
|
||||
default_value = "http://127.0.0.1:8082",
|
||||
action
|
||||
)]
|
||||
host: String,
|
||||
|
||||
/// Additional headers to add to CLI requests
|
||||
///
|
||||
/// Values should be key value pairs separated by ':'
|
||||
#[clap(long, global = true)]
|
||||
#[clap(long, global = true, action)]
|
||||
header: Vec<KeyValue<http::header::HeaderName, http::HeaderValue>>,
|
||||
|
||||
/// Configure the request timeout for CLI requests
|
||||
#[clap(long, global = true, default_value = "30s", parse(try_from_str = humantime::parse_duration))]
|
||||
#[clap(
|
||||
long,
|
||||
global = true,
|
||||
default_value = "30s",
|
||||
value_parser = humantime::parse_duration,
|
||||
)]
|
||||
rpc_timeout: Duration,
|
||||
|
||||
/// Automatically generate an uber-trace-id header for CLI requests
|
||||
///
|
||||
/// The generated trace ID will be emitted at the beginning of the response.
|
||||
#[clap(long, global = true)]
|
||||
#[clap(long, global = true, action)]
|
||||
gen_trace_id: bool,
|
||||
|
||||
/// Set the maximum number of threads to use. Defaults to the number of
|
||||
/// cores on the system
|
||||
#[clap(long)]
|
||||
#[clap(long, action)]
|
||||
num_threads: Option<usize>,
|
||||
|
||||
/// Supports having all-in-one be the default command.
|
||||
|
|
|
@ -12,15 +12,16 @@
|
|||
|
||||
use chrono::prelude::*;
|
||||
use chrono_english::{parse_date_string, Dialect};
|
||||
use clap::{crate_authors, crate_version, Arg, Command};
|
||||
use iox_data_generator::{specification::DataSpec, write::PointsWriterBuilder};
|
||||
use std::fs::File;
|
||||
use std::io::{self, BufRead};
|
||||
use tracing::info;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let help = r#"IOx data point generator
|
||||
#[derive(clap::Parser)]
|
||||
#[clap(
|
||||
name = "iox_data_generator",
|
||||
about = "IOx data point generator",
|
||||
long_about = r#"IOx data point generator
|
||||
|
||||
Examples:
|
||||
# Generate data points using the specification in `spec.toml` and save in the `lp` directory
|
||||
|
@ -43,130 +44,93 @@ Logging:
|
|||
|
||||
# Enable INFO level logging for all of iox_data_generator
|
||||
RUST_LOG=iox_data_generator=info iox_data_generator -s spec.toml -o lp
|
||||
"#,
|
||||
author,
|
||||
version
|
||||
)]
|
||||
struct Config {
|
||||
/// Path to the specification TOML file describing the data generation
|
||||
#[clap(long, short, action)]
|
||||
specification: String,
|
||||
|
||||
/// Print the generated line protocol from a single sample collection to the terminal
|
||||
#[clap(long, action)]
|
||||
print: bool,
|
||||
|
||||
"#;
|
||||
/// Runs the generation with agents writing to a sink. Useful for quick stress test to see how much resources the generator will take
|
||||
#[clap(long, action)]
|
||||
noop: bool,
|
||||
|
||||
let matches = Command::new(help)
|
||||
.version(crate_version!())
|
||||
.author(crate_authors!())
|
||||
.about("IOx data point generator")
|
||||
.arg(
|
||||
Arg::new("SPECIFICATION")
|
||||
.short('s')
|
||||
.long("spec")
|
||||
.help("Path to the specification TOML file describing the data generation")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(Arg::new("PRINT")
|
||||
.long("print")
|
||||
.help("Print the generated line protocol from a single sample collection to the terminal")
|
||||
)
|
||||
.arg(Arg::new("NOOP")
|
||||
.long("noop")
|
||||
.help("Runs the generation with agents writing to a sink. Useful for quick stress test to see how much resources the generator will take")
|
||||
)
|
||||
.arg(
|
||||
Arg::new("OUTPUT")
|
||||
.short('o')
|
||||
.long("output")
|
||||
.help("The filename to write line protocol")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("HOST")
|
||||
.short('h')
|
||||
.long("host")
|
||||
.help("The host name part of the API endpoint to write to")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("ORG")
|
||||
.long("org")
|
||||
.help("The organization name to write to")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("BUCKET")
|
||||
.long("bucket")
|
||||
.help("The bucket name to write to")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("DATABASE_LIST")
|
||||
.long("database_list")
|
||||
.help("File name with a list of databases. 1 per line with <org>_<bucket> format")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("TOKEN")
|
||||
.long("token")
|
||||
.help("The API authorization token used for all requests")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("START")
|
||||
.long("start")
|
||||
.help(
|
||||
"The date and time at which to start the timestamps of the generated data. \
|
||||
Can be an exact datetime like `2020-01-01T01:23:45-05:00` or a fuzzy \
|
||||
specification like `1 hour ago`. If not specified, defaults to now.",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("END")
|
||||
.long("end")
|
||||
.help(
|
||||
"The date and time at which to stop the timestamps of the generated data. \
|
||||
Can be an exact datetime like `2020-01-01T01:23:45-05:00` or a fuzzy \
|
||||
specification like `1 hour ago`. If not specified, defaults to now.",
|
||||
)
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(Arg::new("continue").long("continue").help(
|
||||
"Generate live data using the intervals from the spec after generating historical \
|
||||
data. This option has no effect if you specify an end time.",
|
||||
))
|
||||
.arg(
|
||||
Arg::new("batch_size")
|
||||
.long("batch_size")
|
||||
.help("Generate this many samplings to batch into a single API call. Good for sending a bunch of historical data in quickly if paired with a start time from long ago.")
|
||||
.takes_value(true)
|
||||
)
|
||||
.arg(
|
||||
Arg::new("jaeger_debug_header")
|
||||
.long("jaeger_debug_header")
|
||||
.help("Generate jaeger debug header with given key during write")
|
||||
.takes_value(true)
|
||||
)
|
||||
.get_matches();
|
||||
/// The filename to write line protocol
|
||||
#[clap(long, short, action)]
|
||||
output: Option<String>,
|
||||
|
||||
let disable_log_output = matches.is_present("PRINT");
|
||||
if !disable_log_output {
|
||||
/// The host name part of the API endpoint to write to
|
||||
#[clap(long, short, action)]
|
||||
host: Option<String>,
|
||||
|
||||
/// The organization name to write to
|
||||
#[clap(long, action)]
|
||||
org: Option<String>,
|
||||
|
||||
/// The bucket name to write to
|
||||
#[clap(long, action)]
|
||||
bucket: Option<String>,
|
||||
|
||||
/// File name with a list of databases. 1 per line with <org>_<bucket> format
|
||||
#[clap(long, action)]
|
||||
database_list: Option<String>,
|
||||
|
||||
/// The API authorization token used for all requests
|
||||
#[clap(long, action)]
|
||||
token: Option<String>,
|
||||
|
||||
/// The date and time at which to start the timestamps of the generated data.
|
||||
///
|
||||
/// Can be an exact datetime like `2020-01-01T01:23:45-05:00` or a fuzzy
|
||||
/// specification like `1 hour ago`. If not specified, defaults to no.
|
||||
#[clap(long, action)]
|
||||
start: Option<String>,
|
||||
|
||||
/// The date and time at which to stop the timestamps of the generated data.
|
||||
///
|
||||
/// Can be an exact datetime like `2020-01-01T01:23:45-05:00` or a fuzzy
|
||||
/// specification like `1 hour ago`. If not specified, defaults to now.
|
||||
#[clap(long, action)]
|
||||
end: Option<String>,
|
||||
|
||||
/// Generate live data using the intervals from the spec after generating historical data.
|
||||
///
|
||||
/// This option has no effect if you specify an end time.
|
||||
#[clap(long = "continue", action)]
|
||||
do_continue: bool,
|
||||
|
||||
/// Generate this many samplings to batch into a single API call. Good for sending a bunch of historical data in quickly if paired with a start time from long ago.
|
||||
#[clap(long, action, default_value = "1")]
|
||||
batch_size: usize,
|
||||
|
||||
/// Generate jaeger debug header with given key during write
|
||||
#[clap(long, action)]
|
||||
jaeger_debug_header: Option<String>,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let config: Config = clap::Parser::parse();
|
||||
|
||||
if !config.print {
|
||||
tracing_subscriber::fmt::init();
|
||||
}
|
||||
|
||||
let spec_filename = matches
|
||||
.value_of("SPECIFICATION")
|
||||
// This should never fail if clap is working properly
|
||||
.expect("SPECIFICATION is a required argument");
|
||||
|
||||
let execution_start_time = Local::now();
|
||||
|
||||
let start_datetime = datetime_nanoseconds(matches.value_of("START"), execution_start_time);
|
||||
let end_datetime = datetime_nanoseconds(matches.value_of("END"), execution_start_time);
|
||||
let start_datetime = datetime_nanoseconds(config.start.as_deref(), execution_start_time);
|
||||
let end_datetime = datetime_nanoseconds(config.end.as_deref(), execution_start_time);
|
||||
|
||||
let start_display = start_datetime.unwrap_or_else(|| execution_start_time.timestamp_nanos());
|
||||
let end_display = end_datetime.unwrap_or_else(|| execution_start_time.timestamp_nanos());
|
||||
|
||||
let continue_on = matches.is_present("continue");
|
||||
|
||||
let batch_size = matches
|
||||
.value_of("batch_size")
|
||||
.map(|v| v.parse::<usize>().unwrap())
|
||||
.unwrap_or(1);
|
||||
let continue_on = config.do_continue;
|
||||
|
||||
info!(
|
||||
"Starting at {}, ending at {} ({}){}",
|
||||
|
@ -176,32 +140,25 @@ Logging:
|
|||
if continue_on { " then continuing" } else { "" },
|
||||
);
|
||||
|
||||
let data_spec = DataSpec::from_file(spec_filename)?;
|
||||
let data_spec = DataSpec::from_file(&config.specification)?;
|
||||
|
||||
// TODO: parquet output
|
||||
|
||||
let mut points_writer_builder = if let Some(line_protocol_filename) = matches.value_of("OUTPUT")
|
||||
{
|
||||
let mut points_writer_builder = if let Some(line_protocol_filename) = config.output {
|
||||
PointsWriterBuilder::new_file(line_protocol_filename)?
|
||||
} else if let Some(host) = matches.value_of("HOST") {
|
||||
let token = matches
|
||||
.value_of("TOKEN")
|
||||
.expect("--token must be specified");
|
||||
} else if let Some(host) = config.host {
|
||||
let token = config.token.expect("--token must be specified");
|
||||
|
||||
PointsWriterBuilder::new_api(host, token, matches.value_of("jaeger_debug_header")).await?
|
||||
} else if matches.is_present("PRINT") {
|
||||
PointsWriterBuilder::new_api(host, token, config.jaeger_debug_header.as_deref()).await?
|
||||
} else if config.print {
|
||||
PointsWriterBuilder::new_std_out()
|
||||
} else if matches.is_present("NOOP") {
|
||||
} else if config.noop {
|
||||
PointsWriterBuilder::new_no_op(true)
|
||||
} else {
|
||||
panic!("One of --print or --output or --host must be provided.");
|
||||
};
|
||||
|
||||
let buckets = match (
|
||||
matches.value_of("ORG"),
|
||||
matches.value_of("BUCKET"),
|
||||
matches.value_of("DATABASE_LIST"),
|
||||
) {
|
||||
let buckets = match (config.org, config.bucket, config.database_list) {
|
||||
(Some(org), Some(bucket), None) => {
|
||||
vec![format!("{}_{}", org, bucket)]
|
||||
}
|
||||
|
@ -224,14 +181,14 @@ Logging:
|
|||
end_datetime,
|
||||
execution_start_time.timestamp_nanos(),
|
||||
continue_on,
|
||||
batch_size,
|
||||
disable_log_output,
|
||||
config.batch_size,
|
||||
config.print,
|
||||
)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(total_points) => {
|
||||
if !disable_log_output {
|
||||
if !config.print {
|
||||
eprintln!("Submitted {} total points", total_points);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,8 @@ pub struct TracingConfig {
|
|||
#[clap(
|
||||
long = "--traces-exporter",
|
||||
env = "TRACES_EXPORTER",
|
||||
default_value = "none"
|
||||
default_value = "none",
|
||||
action
|
||||
)]
|
||||
pub traces_exporter: TracesExporter,
|
||||
|
||||
|
@ -59,7 +60,8 @@ pub struct TracingConfig {
|
|||
#[clap(
|
||||
long = "--traces-exporter-jaeger-agent-host",
|
||||
env = "TRACES_EXPORTER_JAEGER_AGENT_HOST",
|
||||
default_value = "0.0.0.0"
|
||||
default_value = "0.0.0.0",
|
||||
action
|
||||
)]
|
||||
pub traces_exporter_jaeger_agent_host: String,
|
||||
|
||||
|
@ -71,7 +73,8 @@ pub struct TracingConfig {
|
|||
#[clap(
|
||||
long = "--traces-exporter-jaeger-agent-port",
|
||||
env = "TRACES_EXPORTER_JAEGER_AGENT_PORT",
|
||||
default_value = "6831"
|
||||
default_value = "6831",
|
||||
action
|
||||
)]
|
||||
pub traces_exporter_jaeger_agent_port: NonZeroU16,
|
||||
|
||||
|
@ -81,7 +84,8 @@ pub struct TracingConfig {
|
|||
#[clap(
|
||||
long = "--traces-exporter-jaeger-service-name",
|
||||
env = "TRACES_EXPORTER_JAEGER_SERVICE_NAME",
|
||||
default_value = "iox-conductor"
|
||||
default_value = "iox-conductor",
|
||||
action
|
||||
)]
|
||||
pub traces_exporter_jaeger_service_name: String,
|
||||
|
||||
|
@ -91,7 +95,8 @@ pub struct TracingConfig {
|
|||
#[clap(
|
||||
long = "--traces-exporter-jaeger-trace-context-header-name",
|
||||
env = "TRACES_EXPORTER_JAEGER_TRACE_CONTEXT_HEADER_NAME",
|
||||
default_value = DEFAULT_JAEGER_TRACE_CONTEXT_HEADER_NAME
|
||||
default_value = DEFAULT_JAEGER_TRACE_CONTEXT_HEADER_NAME,
|
||||
action,
|
||||
)]
|
||||
pub traces_jaeger_trace_context_header_name: String,
|
||||
|
||||
|
@ -101,7 +106,8 @@ pub struct TracingConfig {
|
|||
#[clap(
|
||||
long = "--traces-jaeger-debug-name",
|
||||
env = "TRACES_EXPORTER_JAEGER_DEBUG_NAME",
|
||||
default_value = "jaeger-debug-id"
|
||||
default_value = "jaeger-debug-id",
|
||||
action
|
||||
)]
|
||||
pub traces_jaeger_debug_name: String,
|
||||
|
||||
|
@ -114,7 +120,7 @@ pub struct TracingConfig {
|
|||
long = "--traces-jaeger-tags",
|
||||
env = "TRACES_EXPORTER_JAEGER_TAGS",
|
||||
value_delimiter = ',',
|
||||
parse(try_from_str)
|
||||
action
|
||||
)]
|
||||
pub traces_jaeger_tags: Option<Vec<JaegerTag>>,
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ pub struct LoggingConfig {
|
|||
///
|
||||
/// If None, [`crate::Builder`] sets a default, by default [`crate::Builder::DEFAULT_LOG_FILTER`],
|
||||
/// but overrideable with [`crate::Builder::with_default_log_filter`].
|
||||
#[clap(long = "--log-filter", env = "LOG_FILTER")]
|
||||
#[clap(long = "--log-filter", env = "LOG_FILTER", action)]
|
||||
pub log_filter: Option<String>,
|
||||
|
||||
/// Logs: filter short-hand
|
||||
|
@ -39,9 +39,8 @@ pub struct LoggingConfig {
|
|||
#[clap(
|
||||
short = 'v',
|
||||
long = "--verbose",
|
||||
multiple_occurrences = true,
|
||||
takes_value = false,
|
||||
parse(from_occurrences)
|
||||
action = clap::ArgAction::Count,
|
||||
)]
|
||||
pub log_verbose_count: u8,
|
||||
|
||||
|
@ -54,7 +53,8 @@ pub struct LoggingConfig {
|
|||
long = "--log-destination",
|
||||
env = "LOG_DESTINATION",
|
||||
default_value = "stdout",
|
||||
verbatim_doc_comment
|
||||
verbatim_doc_comment,
|
||||
action
|
||||
)]
|
||||
pub log_destination: LogDestination,
|
||||
|
||||
|
@ -93,7 +93,13 @@ pub struct LoggingConfig {
|
|||
/// level=info msg="This is an info message" target="logging" location="logfmt/tests/logging.rs:36" time=1612181556329599000
|
||||
/// level=debug msg="This is a debug message" target="logging" location="logfmt/tests/logging.rs:37" time=1612181556329618000
|
||||
/// level=trace msg="This is a trace message" target="logging" location="logfmt/tests/logging.rs:38" time=1612181556329634000
|
||||
#[clap(long = "--log-format", env = "LOG_FORMAT", default_value = "full", verbatim_doc_comment)]
|
||||
#[clap(
|
||||
long = "--log-format",
|
||||
env = "LOG_FORMAT",
|
||||
default_value = "full",
|
||||
verbatim_doc_comment,
|
||||
action,
|
||||
)]
|
||||
pub log_format: LogFormat,
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue