From 922e48182a2bec531c662929c133ad1719a81340 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 25 Jun 2024 18:25:55 -0500 Subject: [PATCH 01/96] chore(ci): Improve Vale style linting: - Run Vale in a Docker container by running ./.ci/vale/vale.sh - Define vale lint scripts in package.json. Lint added and changed files for products. - Define error-level vale linting in the lint-staged configuration for staged files. - Improve product-specific nomenclature checks (e.g. database vs bucket) - Simplify vocab terms and allowed words lists. - Fixes content errors revealed by vale. --- .ci/vale/styles/Cloud-Dedicated/Branding.yml | 11 +++ .ci/vale/styles/Cloud-Dedicated/v3Schema.yml | 10 ++ .ci/vale/styles/Cloud-Serverless/Branding.yml | 14 +++ .ci/vale/styles/Clustered/Branding.yml | 11 +++ .ci/vale/styles/Clustered/v3Schema.yml | 10 ++ .ci/vale/styles/InfluxDataDocs/Branding.yml | 55 +++-------- .ci/vale/styles/InfluxDataDocs/Spelling.yml | 34 ++----- .../Terms/configuration-terms.txt | 12 --- .../Terms/{influxdb.txt => ignore.txt} | 91 +++++++------------ .../styles/InfluxDataDocs/Terms/telegraf.txt | 1 - .ci/vale/styles/InfluxDataDocs/WordList.yml | 1 + .../vocabularies/Cloud-Dedicated/accept.txt | 2 - .../vocabularies/Cloud-Dedicated/reject.txt | 6 -- .../vocabularies/Cloud-Serverless/accept.txt | 2 - .../vocabularies/Cloud-Serverless/reject.txt | 7 -- .../config/vocabularies/Clustered/accept.txt | 1 - .../config/vocabularies/Clustered/reject.txt | 6 -- .../vocabularies/InfluxDataDocs/accept.txt | 77 ++++++++++++++++ .../vocabularies/InfluxDataDocs/reject.txt | 0 .ci/vale/vale.sh | 28 +++--- .lintstagedrc.mjs | 62 ++++++++++--- .vale.ini | 9 +- compose.yaml | 32 ------- content/influxdb/cloud-dedicated/.vale.ini | 9 +- .../cloud-dedicated/get-started/_index.md | 4 +- content/influxdb/cloud-serverless/.vale.ini | 9 +- content/influxdb/clustered/.vale.ini | 9 +- .../influxdb/clustered/get-started/_index.md | 4 +- package.json | 10 +- 29 files changed, 280 insertions(+), 247 deletions(-) create mode 100644 .ci/vale/styles/Cloud-Dedicated/Branding.yml create mode 100644 .ci/vale/styles/Cloud-Dedicated/v3Schema.yml create mode 100644 .ci/vale/styles/Cloud-Serverless/Branding.yml create mode 100644 .ci/vale/styles/Clustered/Branding.yml create mode 100644 .ci/vale/styles/Clustered/v3Schema.yml delete mode 100644 .ci/vale/styles/InfluxDataDocs/Terms/configuration-terms.txt rename .ci/vale/styles/InfluxDataDocs/Terms/{influxdb.txt => ignore.txt} (65%) delete mode 100644 .ci/vale/styles/InfluxDataDocs/Terms/telegraf.txt delete mode 100644 .ci/vale/styles/config/vocabularies/Cloud-Dedicated/accept.txt delete mode 100644 .ci/vale/styles/config/vocabularies/Cloud-Dedicated/reject.txt delete mode 100644 .ci/vale/styles/config/vocabularies/Cloud-Serverless/accept.txt delete mode 100644 .ci/vale/styles/config/vocabularies/Cloud-Serverless/reject.txt delete mode 100644 .ci/vale/styles/config/vocabularies/Clustered/accept.txt delete mode 100644 .ci/vale/styles/config/vocabularies/Clustered/reject.txt create mode 100644 .ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt create mode 100644 .ci/vale/styles/config/vocabularies/InfluxDataDocs/reject.txt mode change 100644 => 100755 .ci/vale/vale.sh diff --git a/.ci/vale/styles/Cloud-Dedicated/Branding.yml b/.ci/vale/styles/Cloud-Dedicated/Branding.yml new file mode 100644 index 000000000..b22437fa7 --- /dev/null +++ b/.ci/vale/styles/Cloud-Dedicated/Branding.yml @@ -0,0 +1,11 @@ +extends: substitution +message: Did you mean '%s' instead of '%s' +level: warning +ignorecase: false +# swap maps tokens in form of bad: good + # NOTE: The left-hand (bad) side can match the right-hand (good) side; + # Vale ignores alerts that match the intended form. +swap: + 'cloud-serverless|clustered': cloud-dedicated + 'Cloud Serverless|Clustered': Cloud Dedicated + 'API token': database token diff --git a/.ci/vale/styles/Cloud-Dedicated/v3Schema.yml b/.ci/vale/styles/Cloud-Dedicated/v3Schema.yml new file mode 100644 index 000000000..a95342af8 --- /dev/null +++ b/.ci/vale/styles/Cloud-Dedicated/v3Schema.yml @@ -0,0 +1,10 @@ +extends: substitution +message: Did you mean '%s' instead of '%s' +level: warning +ignorecase: false +# swap maps tokens in form of bad: good + # NOTE: The left-hand (bad) side can match the right-hand (good) side; + # Vale ignores alerts that match the intended form. +swap: + '(?i)bucket': database + '(?i)measurement': table \ No newline at end of file diff --git a/.ci/vale/styles/Cloud-Serverless/Branding.yml b/.ci/vale/styles/Cloud-Serverless/Branding.yml new file mode 100644 index 000000000..6834da4e2 --- /dev/null +++ b/.ci/vale/styles/Cloud-Serverless/Branding.yml @@ -0,0 +1,14 @@ +extends: substitution +message: Did you mean '%s' instead of '%s' +level: warning +ignorecase: false +# swap maps tokens in form of bad: good + # NOTE: The left-hand (bad) side can match the right-hand (good) side; + # Vale ignores alerts that match the intended form. +swap: + 'cloud-dedicated|clustered': cloud-serverless + 'Cloud Dedicated|Clustered': Cloud Serverless + '(?i)database token': API token + '(?i)management token': API token + '(?i)database': bucket + '(?i)table': measurement \ No newline at end of file diff --git a/.ci/vale/styles/Clustered/Branding.yml b/.ci/vale/styles/Clustered/Branding.yml new file mode 100644 index 000000000..83439dad9 --- /dev/null +++ b/.ci/vale/styles/Clustered/Branding.yml @@ -0,0 +1,11 @@ +extends: substitution +message: Did you mean '%s' instead of '%s' +level: warning +ignorecase: false +# swap maps tokens in form of bad: good + # NOTE: The left-hand (bad) side can match the right-hand (good) side; + # Vale ignores alerts that match the intended form. +swap: + 'cloud-serverless|cloud-dedicated': clustered + 'Cloud Serverless|Cloud Dedicated': Clustered + 'API token': database token diff --git a/.ci/vale/styles/Clustered/v3Schema.yml b/.ci/vale/styles/Clustered/v3Schema.yml new file mode 100644 index 000000000..a95342af8 --- /dev/null +++ b/.ci/vale/styles/Clustered/v3Schema.yml @@ -0,0 +1,10 @@ +extends: substitution +message: Did you mean '%s' instead of '%s' +level: warning +ignorecase: false +# swap maps tokens in form of bad: good + # NOTE: The left-hand (bad) side can match the right-hand (good) side; + # Vale ignores alerts that match the intended form. +swap: + '(?i)bucket': database + '(?i)measurement': table \ No newline at end of file diff --git a/.ci/vale/styles/InfluxDataDocs/Branding.yml b/.ci/vale/styles/InfluxDataDocs/Branding.yml index a63117119..1ecd398b0 100644 --- a/.ci/vale/styles/InfluxDataDocs/Branding.yml +++ b/.ci/vale/styles/InfluxDataDocs/Branding.yml @@ -1,45 +1,20 @@ extends: substitution message: Use '%s' instead of '%s' level: warning -ignorecase: true +ignorecase: false # swap maps tokens in form of bad: good + # NOTE: The left-hand (bad) side can match the right-hand (good) side; + # Vale ignores alerts that match the intended form. swap: - # NOTE: The left-hand (bad) side can match the right-hand (good) side; Vale - # will ignore any alerts that match the intended form. - "anaconda": Anaconda - "(?i)api": API - "arrow": Arrow - "authtoken": authToken - "Authtoken": AuthToken - "chronograf": Chronograf - "cli": CLI - "(?i)clockface": Clockface - "the compactor": the Compactor - "data explorer": Data Explorer - "datetime": dateTime - "dedupe": deduplicate - "(?i)executionplan": ExecutionPlan - "fieldkey": fieldKey - "fieldtype": fieldType - "flight": Flight - "(?i)flightquery": FlightQuery - "(?i)FlightSQL": Flight SQL - "/b(?i)influxdata/b": InfluxData - "/w*/b(?i)influxdb": InfluxDB - "(?i)influxql": InfluxQL - "influxer": Influxer - "the ingester": the Ingester - "(?i)iox": v3 - "java[ -]?scripts?": JavaScript - "kapa": Kapacitor - "logicalplan": LogicalPlan - "the object store": the Object store - "a {{% product-name %}}": an {{% product-name %}} - "Pandas": pandas - " parquet": Parquet - "the querier": the Querier - "SQL Alchemy": SQLAlchemy - "superset": Superset - "tagkey": tagKey - "telegraf": Telegraf - "telegraph": Telegraf + 'the compactor': the Compactor + 'dedupe': deduplicate + '/b(?i)influxdata/b': InfluxData + '/w*/b(?i)influxdb': InfluxDB + 'the ingester': the Ingester + '(?i)iox': v3 + 'the object store': the Object store + 'a {{% product-name %}}': an {{% product-name %}} + 'the querier': the Querier + 'SQL Alchemy': SQLAlchemy + 'telegraph': Telegraf + '(?i)vscode': VSCode diff --git a/.ci/vale/styles/InfluxDataDocs/Spelling.yml b/.ci/vale/styles/InfluxDataDocs/Spelling.yml index d7bb17a89..bdce7da2a 100644 --- a/.ci/vale/styles/InfluxDataDocs/Spelling.yml +++ b/.ci/vale/styles/InfluxDataDocs/Spelling.yml @@ -1,36 +1,14 @@ extends: spelling message: "Did you really mean '%s'?" -level: error +level: warning scope: - ~table.header - ~table.cell ignore: - # Located at StylesPath/ignore1.txt - - InfluxDataDocs/Terms/influxdb.txt - - InfluxDataDocs/Terms/configuration-terms.txt +# Ignore the following words. All words are case-insensitive. +# To use case-sensitive matching, use the filters section or vocabulary Terms. + - InfluxDataDocs/Terms/ignore.txt - InfluxDataDocs/Terms/query-functions.txt - - InfluxDataDocs/Terms/telegraf.txt filters: - # Ignore Hugo, layout, and design words. - - 'Flexbox' - - '(?i)frontmatter' - - '(?i)shortcode(s?)' - - '(?i)tooltip(s?)' - # Ignore all words starting with 'py'. - # e.g., 'PyYAML'. - - '[pP]y.*\b' - # Ignore underscore-delimited words. - # e.g., avg_temp - - '\b\w+_\w+\b' - - '\b_\w+\b' - # Ignore SQL variables. - - '(?i)AS \w+' - # Ignore custom words - - '(?i)deduplicat(ion|e|ed|es|ing)' - - '(?i)downsampl(e|ing|ed|es)' - - 'InfluxDB-specific' - - '(?i)repartition(ed|s|ing)' - - '(?i)subcommand(s?)' - - '(?i)union(ing|ed|s)?' - - 'unsignedLong' - - 'US (East|West|Central|North|South|Northeast|Northwest|Southeast|Southwest)' +# Allow product-specific Branding.yml configurations to handle [Ss]erverless while also allowing serverless as a valid dictionary word. + - '[Ss]erverless' diff --git a/.ci/vale/styles/InfluxDataDocs/Terms/configuration-terms.txt b/.ci/vale/styles/InfluxDataDocs/Terms/configuration-terms.txt deleted file mode 100644 index b443d3739..000000000 --- a/.ci/vale/styles/InfluxDataDocs/Terms/configuration-terms.txt +++ /dev/null @@ -1,12 +0,0 @@ -autogen -batchBucket -batchInterval -commentPrefix -destinationBucket -destinationHost -destinationOrg -destinationToken -quoteChar -retentionRules -sourceBucket -src \ No newline at end of file diff --git a/.ci/vale/styles/InfluxDataDocs/Terms/influxdb.txt b/.ci/vale/styles/InfluxDataDocs/Terms/ignore.txt similarity index 65% rename from .ci/vale/styles/InfluxDataDocs/Terms/influxdb.txt rename to .ci/vale/styles/InfluxDataDocs/Terms/ignore.txt index feaac0799..ad13fb4af 100644 --- a/.ci/vale/styles/InfluxDataDocs/Terms/influxdb.txt +++ b/.ci/vale/styles/InfluxDataDocs/Terms/ignore.txt @@ -1,126 +1,97 @@ -api -apis +autogen +boolean +bundler +chronograf +clockface +flexbox +flight +frontmatter +kapacitor +telegraf +unix args authtoken authz -boolean -booleans -bundler -bundlers -chronograf -cli -clockface -cloud +callout codeblock compactor -conda -csv +config +crypto dashboarding datagram datasource -datetime +deduplicate deserialize -downsample dotenv +downsample enum -executionplan -fieldkey -fieldtype file_groups -flighquery -Grafana -groupId +fullscreen gzip gzipped homogenous hostname -hostUrl -hostURL -HostURL implementor -implementors -influxctl influxd -influxdata.com -influx3 ingester ingesters -iox -kapacitor -lat -locf -logicalplan logstash lon lookahead lookbehind -metaquery metaqueries +metaquery middleware namespace -noaa -npm -oauth output_ordering -pandas param performant projection protofiles pushdown querier +queryable +quoteChar rearchitect -rearchitected -redoc remediations repartition +retentionRules retention_policy retryable rp serializable serializer -serverless shortcode signout -Splunk -SQLAlchemy +src stderr +stdin stdout subcommand -subcommands subnet subnets subprocessor -subprocessors -subquery subqueries +subquery substring -substrings -superset svg syntaxes -tagkey +tagKey tagset -telegraf -telegraf's tombstoned -tsm +tooltip uint uinteger unescaped ungroup ungrouped -unprocessable -unix +unioned +unioning +unions unmarshal unmarshalled unpackage +unprocessable +unsignedLong upsample upsert -urls -venv -VSCode -WALs -Webpack -xpath -XPath diff --git a/.ci/vale/styles/InfluxDataDocs/Terms/telegraf.txt b/.ci/vale/styles/InfluxDataDocs/Terms/telegraf.txt deleted file mode 100644 index 00140d688..000000000 --- a/.ci/vale/styles/InfluxDataDocs/Terms/telegraf.txt +++ /dev/null @@ -1 +0,0 @@ -[Tt]elegraf diff --git a/.ci/vale/styles/InfluxDataDocs/WordList.yml b/.ci/vale/styles/InfluxDataDocs/WordList.yml index 5d1402467..59ab8922c 100644 --- a/.ci/vale/styles/InfluxDataDocs/WordList.yml +++ b/.ci/vale/styles/InfluxDataDocs/WordList.yml @@ -55,6 +55,7 @@ swap: fewer data: less data file name: filename firewalls: firewall rules + fully qualified: fully-qualified functionality: capability|feature Google account: Google Account Google accounts: Google Accounts diff --git a/.ci/vale/styles/config/vocabularies/Cloud-Dedicated/accept.txt b/.ci/vale/styles/config/vocabularies/Cloud-Dedicated/accept.txt deleted file mode 100644 index d0ad5ad23..000000000 --- a/.ci/vale/styles/config/vocabularies/Cloud-Dedicated/accept.txt +++ /dev/null @@ -1,2 +0,0 @@ -cloud-dedicated -Cloud Dedicated \ No newline at end of file diff --git a/.ci/vale/styles/config/vocabularies/Cloud-Dedicated/reject.txt b/.ci/vale/styles/config/vocabularies/Cloud-Dedicated/reject.txt deleted file mode 100644 index 07bbc77dd..000000000 --- a/.ci/vale/styles/config/vocabularies/Cloud-Dedicated/reject.txt +++ /dev/null @@ -1,6 +0,0 @@ -API token -bucket name -Cloud Serverless -cloud-serverless -Clustered -clustered diff --git a/.ci/vale/styles/config/vocabularies/Cloud-Serverless/accept.txt b/.ci/vale/styles/config/vocabularies/Cloud-Serverless/accept.txt deleted file mode 100644 index ca293d0e2..000000000 --- a/.ci/vale/styles/config/vocabularies/Cloud-Serverless/accept.txt +++ /dev/null @@ -1,2 +0,0 @@ -cloud-serverless -Cloud Serverless \ No newline at end of file diff --git a/.ci/vale/styles/config/vocabularies/Cloud-Serverless/reject.txt b/.ci/vale/styles/config/vocabularies/Cloud-Serverless/reject.txt deleted file mode 100644 index e90ca8162..000000000 --- a/.ci/vale/styles/config/vocabularies/Cloud-Serverless/reject.txt +++ /dev/null @@ -1,7 +0,0 @@ -Cloud Dedicated -cloud-dedicated -Clustered -clustered -database name -database token -management token \ No newline at end of file diff --git a/.ci/vale/styles/config/vocabularies/Clustered/accept.txt b/.ci/vale/styles/config/vocabularies/Clustered/accept.txt deleted file mode 100644 index 0a3224ec9..000000000 --- a/.ci/vale/styles/config/vocabularies/Clustered/accept.txt +++ /dev/null @@ -1 +0,0 @@ -clustered diff --git a/.ci/vale/styles/config/vocabularies/Clustered/reject.txt b/.ci/vale/styles/config/vocabularies/Clustered/reject.txt deleted file mode 100644 index 5bbfb6575..000000000 --- a/.ci/vale/styles/config/vocabularies/Clustered/reject.txt +++ /dev/null @@ -1,6 +0,0 @@ -API token -bucket name -Cloud Dedicated -cloud-dedicated -Cloud Serverless -cloud-serverless diff --git a/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt new file mode 100644 index 000000000..2d7ef17b4 --- /dev/null +++ b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt @@ -0,0 +1,77 @@ +(?i)AS \w+ +(InfluxQL|influxql) +(tsm|TSM) +(xpath|XPath) +APIs? +Anaconda +Apache Superset +Arrow +AuthToken +CLI +CSV +Data Explorer +Dedup +Execd +ExecutionPlan +Flight SQL +FlightQuery +GBs? +Grafana +HostURL +InfluxDB Cloud +InfluxDB OSS +InfluxDB-specific +Influxer +JavaScript +KBs? +LogicalPlan +MBs? +PBs? +Parquet +Redoc +SQLAlchemy +SQLAlchemy +Splunk +[Ss]uperset +TBs? +UI +URLs +US (East|West|Central|North|South|Northeast|Northwest|Southeast|Southwest) +Unix +WALs? +Webpack +[pP]y.*\b +\b\w+_\w+\b +\b_\w+\b +batchBucket +batchInterval +commentPrefix +destinationBucket +destinationHost +destinationOrg +destinationToken +docs-v2 +fieldKey +fieldType +groupId +hostURL +hostUrl +influx3 +influxctl +influxd +influxdata.com +iox +lat +locf +logicalplan +noaa|NOAA +npm|NPM +oauth|OAuth +pandas +quoteChar +retentionRules +sourceBucket +tagKey +v2 +v3 +venv diff --git a/.ci/vale/styles/config/vocabularies/InfluxDataDocs/reject.txt b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/reject.txt new file mode 100644 index 000000000..e69de29bb diff --git a/.ci/vale/vale.sh b/.ci/vale/vale.sh old mode 100644 new mode 100755 index 2bf5b71c1..cc3ff3799 --- a/.ci/vale/vale.sh +++ b/.ci/vale/vale.sh @@ -1,16 +1,20 @@ -# Lint cloud-dedicated -docspath=. -contentpath=$docspath/content +#!/bin/bash -# Vale searches for a configuration file (.vale.ini) in the directory of the file being linted, and then in each of its parent directories. -# Lint cloud-dedicated -npx vale --output=line --relative --minAlertLevel=error $contentpath/influxdb/cloud-dedicated +# Run Vale to lint files for writing style and consistency -# Lint cloud-serverless -npx vale --config=$contentpath/influxdb/cloud-serverless/.vale.ini --output=line --relative --minAlertLevel=error $contentpath/influxdb/cloud-serverless +# Example usage: -# Lint clustered -npx vale --config=$contentpath/influxdb/clustered/.vale.ini --output=line --relative --minAlertLevel=error $contentpath/influxdb/clustered +# Lint all added and modified files in the cloud-dedicated directory and report suggestions, warnings, and errors. -# Lint telegraf -# npx vale --config=$docspath/.vale.ini --output=line --relative --minAlertLevel=error $contentpath/telegraf +# git diff --name-only --diff-filter=d HEAD | grep "content/influxdb/cloud-dedicated" | xargs .ci/vale/vale.sh --minAlertLevel=suggestion --config=content/influxdb/cloud-dedicated/.vale.ini + +# Lint files provided as arguments +docker run \ + --rm \ + --label tag=influxdata-docs \ + --label stage=lint \ + --mount type=bind,src=$(pwd),dst=/workdir \ + -w /workdir \ + --entrypoint /bin/vale \ + jdkato/vale:latest \ + "$@" \ No newline at end of file diff --git a/.lintstagedrc.mjs b/.lintstagedrc.mjs index 1cd58f178..272981b9c 100644 --- a/.lintstagedrc.mjs +++ b/.lintstagedrc.mjs @@ -27,7 +27,7 @@ function testStagedContent(paths, productPath) { // This script first checks if there are any tests to run using `pytest --collect-only`. // If there are tests, it runs them; otherwise, it exits with a success code. // Whether tests pass or fail, the container is removed, - // but the CONTENT container will remain until the next run. + // but the CONTENT container and associated volume will remain until the next run. `sh -c "docker run --rm --name ${TEST}-collector \ --env-file ${productPath}/.env.test \ --volumes-from ${CONTENT} \ @@ -46,23 +46,55 @@ function testStagedContent(paths, productPath) { ]; } +// Export a lint-staged configuration object. +// Run tests and linters on staged files. export default { - "*.{js,css}": paths => `prettier --write ${paths.join(' ')}`, + "*.{js,css}": paths => `prettier --write ${paths.join(' ')}`, - // Don't let prettier check or write Markdown files for now; - // it indents code blocks within list items, which breaks Hugo's rendering. - // "*.md": paths => `prettier --check ${paths.join(' ')}`, + "*.md": paths => `.ci/vale/vale.sh --config .vale.ini ${paths} --min|| true`, - "content/influxdb/cloud-dedicated/**/*.md": - paths => [...testStagedContent(paths, 'content/influxdb/cloud-dedicated')], - "content/influxdb/cloud-serverless/**/*.md": - paths => [...testStagedContent(paths, 'content/influxdb/cloud-serverless')], - "content/influxdb/clustered/**/*.md": - paths => [...testStagedContent(paths, 'content/influxdb/clustered')], - - // "content/influxdb/cloud-serverless/**/*.md": "docker compose run -T lint --config=content/influxdb/cloud-serverless/.vale.ini --minAlertLevel=error", + "content/influxdb/api-docs/": paths => + `.ci/vale/vale.sh --config .vale.ini --minAlertLevel error ${paths}`, - // "content/influxdb/clustered/**/*.md": "docker compose run -T lint --config=content/influxdb/clustered/.vale.ini --minAlertLevel=error", + "content/influxdb/cloud/**/*.md": + paths => [ + `.ci/vale/vale.sh --config .vale.ini --minAlertLevel error ${paths}`, + ...testStagedContent(paths, 'content/influxdb/cloud'), + ], - // "content/influxdb/{cloud,v2,telegraf}/**/*.md": "docker compose run -T lint --config=.vale.ini --minAlertLevel=error" + "content/influxdb/cloud-dedicated/**/*.md": + paths => [ + `.ci/vale/vale.sh --config content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel error ${paths}`, + ...testStagedContent(paths, 'content/influxdb/cloud-dedicated'), + ], + + "content/influxdb/cloud-serverless/**/*.md": + paths => [ + `.ci/vale/vale.sh --config content/influxdb/cloud-serverless/.vale.ini --minAlertLevel error ${paths}`, + ...testStagedContent(paths, 'content/influxdb/cloud-serverless'), + ], + + "content/influxdb/clustered/**/*.md": + paths => [ + `.ci/vale/vale.sh --config content/influxdb/clustered/.vale.ini --minAlertLevel error ${paths}`, + ...testStagedContent(paths, 'content/influxdb/clustered'), + ], + + "content/influxdb/v1/**/*.md": + paths => [ + `.ci/vale/vale.sh --config .vale.ini --minAlertLevel error ${paths}`, + ...testStagedContent(paths, 'content/influxdb/v1'), + ], + + "content/influxdb/v2/**/*.md": + paths => [ + `.ci/vale/vale.sh --config .vale.ini --minAlertLevel error ${paths}`, + ...testStagedContent(paths, 'content/influxdb/v2'), + ], + + "content/telegraf/**/*.md": + paths => [ + `.ci/vale/vale.sh --config .vale.ini --minAlertLevel error ${paths}`, + ...testStagedContent(paths, 'content/telegraf'), + ], } diff --git a/.vale.ini b/.vale.ini index e6a2552f2..88c1fcd2f 100644 --- a/.vale.ini +++ b/.vale.ini @@ -1,12 +1,15 @@ -StylesPath = ".ci/vale/styles" +StylesPath = .ci/vale/styles MinAlertLevel = warning -Packages = Google, Hugo, write-good +Vocab = InfluxDataDocs + +Packages = Google, write-good, Hugo [*.md] BasedOnStyles = Vale, InfluxDataDocs, Google, write-good Google.Ellipses = NO Google.Headings = NO -Google.WordList = NO \ No newline at end of file +Google.WordList = NO +Vale.Spelling = NO \ No newline at end of file diff --git a/compose.yaml b/compose.yaml index d0edb5c4d..c61cd8910 100644 --- a/compose.yaml +++ b/compose.yaml @@ -1,39 +1,7 @@ # This is a Docker Compose file for the InfluxData documentation site. ## Run documentation tests for code samples. name: influxdata-docs -volumes: - test-content: services: - markdownlint: - image: davidanson/markdownlint-cli2:v0.13.0 - container_name: markdownlint - profiles: - - ci - - lint - volumes: - - type: bind - source: . - target: /workdir - working_dir: /workdir - build: - context: . - vale: - image: jdkato/vale:latest - container_name: vale - profiles: - - ci - - lint - volumes: - - type: bind - source: . - target: /workdir - working_dir: /workdir - entrypoint: ["/bin/vale"] - build: - context: . - dockerfile_inline: | - COPY .ci /src/.ci - COPY **/.vale.ini /src/ ## Run InfluxData documentation with the hugo development server on port 1313. ## For more information about the hugomods/hugo image, see ## https://docker.hugomods.com/docs/development/docker-compose/ diff --git a/content/influxdb/cloud-dedicated/.vale.ini b/content/influxdb/cloud-dedicated/.vale.ini index 9a9120b7e..6d35f96b0 100644 --- a/content/influxdb/cloud-dedicated/.vale.ini +++ b/content/influxdb/cloud-dedicated/.vale.ini @@ -1,14 +1,15 @@ StylesPath = "../../../.ci/vale/styles" -Vocab = Cloud-Dedicated +Vocab = InfluxDataDocs MinAlertLevel = warning -Packages = Google, Hugo, write-good +Packages = Google, write-good, Hugo [*.md] -BasedOnStyles = Vale, InfluxDataDocs, Google, write-good +BasedOnStyles = Vale, InfluxDataDocs, Cloud-Dedicated, Google, write-good Google.Ellipses = NO Google.Headings = NO -Google.WordList = NO \ No newline at end of file +Google.WordList = NO +Vale.Spelling = NO \ No newline at end of file diff --git a/content/influxdb/cloud-dedicated/get-started/_index.md b/content/influxdb/cloud-dedicated/get-started/_index.md index e0ab6569c..621a0952b 100644 --- a/content/influxdb/cloud-dedicated/get-started/_index.md +++ b/content/influxdb/cloud-dedicated/get-started/_index.md @@ -174,9 +174,7 @@ can write data to {{% product-name %}}. ## Authorization **{{% product-name %}} requires authentication** using -[tokens](/influxdb/cloud-dedicated/admin/tokens/). - -There are two types of tokens: +one of the following [token](/influxdb/cloud-dedicated/admin/tokens/) types: - **Database token**: A token that grants read and write access to InfluxDB databases. diff --git a/content/influxdb/cloud-serverless/.vale.ini b/content/influxdb/cloud-serverless/.vale.ini index a8c080a2f..191227f55 100644 --- a/content/influxdb/cloud-serverless/.vale.ini +++ b/content/influxdb/cloud-serverless/.vale.ini @@ -1,14 +1,15 @@ StylesPath = "../../../.ci/vale/styles" -Vocab = Cloud-Serverless +Vocab = InfluxDataDocs MinAlertLevel = warning -Packages = Google, Hugo, write-good +Packages = Google, write-good, Hugo [*.md] -BasedOnStyles = Vale, InfluxDataDocs, Google, write-good +BasedOnStyles = Vale, InfluxDataDocs, Cloud-Serverless, Google, write-good Google.Ellipses = NO Google.Headings = NO -Google.WordList = NO \ No newline at end of file +Google.WordList = NO +Vale.Spelling = NO \ No newline at end of file diff --git a/content/influxdb/clustered/.vale.ini b/content/influxdb/clustered/.vale.ini index ba6c62497..2095bfd0c 100644 --- a/content/influxdb/clustered/.vale.ini +++ b/content/influxdb/clustered/.vale.ini @@ -1,14 +1,15 @@ StylesPath = "../../../.ci/vale/styles" -Vocab = Clustered +Vocab = InfluxDataDocs MinAlertLevel = warning -Packages = Google, Hugo, write-good +Packages = Google, write-good, Hugo [*.md] -BasedOnStyles = Vale, InfluxDataDocs, Google, write-good +BasedOnStyles = Vale, InfluxDataDocs, Clustered, Google, write-good Google.Ellipses = NO Google.Headings = NO -Google.WordList = NO \ No newline at end of file +Google.WordList = NO +Vale.Spelling = NO \ No newline at end of file diff --git a/content/influxdb/clustered/get-started/_index.md b/content/influxdb/clustered/get-started/_index.md index f9e04d982..bff1438f9 100644 --- a/content/influxdb/clustered/get-started/_index.md +++ b/content/influxdb/clustered/get-started/_index.md @@ -161,9 +161,7 @@ They use the HTTP API to write data and use InfluxDB's Flight gRPC API to query ## Authorization **{{% product-name %}} requires authentication** using -[tokens](/influxdb/clustered/admin/tokens/). - -There are two types of tokens: +one of the following [token](/influxdb/clustered/admin/tokens/) types: - **Database token**: A token that grants read and write access to InfluxDB databases. diff --git a/package.json b/package.json index 60fe94e2c..5e2c28c16 100644 --- a/package.json +++ b/package.json @@ -20,9 +20,15 @@ "js-yaml": "^4.1.0" }, "scripts": { + "lint-cloud": "git diff --name-only --diff-filter=d HEAD | grep \"content/influxdb/cloud\" | xargs .ci/vale/vale.sh --minAlertLevel=suggestion --config=.vale.ini", + "lint-clustered": "git diff --name-only --diff-filter=d HEAD | grep \"content/influxdb/clustered\" | xargs .ci/vale/vale.sh --minAlertLevel=suggestion --config=content/influxdb/clustered/.vale.ini", + "lint-dedicated": "git diff --name-only --diff-filter=d HEAD | grep \"content/influxdb/cloud-dedicated\" | xargs .ci/vale/vale.sh --minAlertLevel=suggestion --config=content/influxdb/cloud-dedicated/.vale.ini", + "lint-serverless": "git diff --name-only --diff-filter=d HEAD | grep \"content/influxdb/cloud-serverless\" | xargs .ci/vale/vale.sh --minAlertLevel=suggestion --config=content/influxdb/cloud-serverless/.vale.ini", + "lint-telegraf": "git diff --name-only --diff-filter=d HEAD | grep \"content/telegraf\" | xargs .ci/vale/vale.sh --minAlertLevel=suggestion --config=.vale.ini", + "lint-v1": "git diff --name-only --diff-filter=d HEAD | grep \"content/influxdb/v1\" | xargs .ci/vale/vale.sh --minAlertLevel=suggestion --config=.vale.ini", + "lint-v2": "git diff --name-only --diff-filter=d HEAD | grep \"content/influxdb/v2\" | xargs .ci/vale/vale.sh --minAlertLevel=suggestion --config=.vale.ini", "prepare": "husky", - "lint-vale": ".ci/vale/vale.sh", - "lint-staged": "lint-staged --relative" + "test": "lint-staged --relative" }, "main": "index.js", "module": "main.js", From ea9b786b83c05dde5da33a5422e7cad70b28a995 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 26 Jun 2024 10:26:52 -0500 Subject: [PATCH 02/96] fix(ci): test content: - Add python replacement for get-started database/bucket name - Fixes race condition when deleting previous content: - Assign each product it's own test content volume instead of sharing a volume. - Delete existing content in the volume before copying test files. - Pros and cons, but I suspect a similar approach will be better when ported to Circle CI. --- .lintstagedrc.mjs | 19 +++++++++++++++++-- .../cloud-dedicated/get-started/query.md | 2 +- .../cloud-serverless/get-started/query.md | 2 +- .../influxdb/clustered/get-started/query.md | 2 +- test/src/prepare-content.sh | 3 +-- 5 files changed, 21 insertions(+), 7 deletions(-) diff --git a/.lintstagedrc.mjs b/.lintstagedrc.mjs index 272981b9c..d07d962d6 100644 --- a/.lintstagedrc.mjs +++ b/.lintstagedrc.mjs @@ -6,14 +6,26 @@ function testStagedContent(paths, productPath) { const TEST = `pytest-${productName}`; return [ - // Remove any existing test container and volume + // Remove existing containers `sh -c "docker rm -f ${CONTENT} || true"`, `sh -c "docker rm -f ${TEST} || true"`, `docker build . -f Dockerfile.tests -t influxdata-docs/tests:latest`, + // Remove any existing Docker volume for staged content + `sh -c "docker volume rm -f ${CONTENT} || true"`, + + // Create a Docker volume for product staged content + `sh -c "docker volume create \ + --label tag=influxdata-docs \ + --label stage=test \ + --name ${CONTENT} || true"`, + // Copy staged content to a volume and run the prepare script + // to remove the existing `docker run --name ${CONTENT} + --label tag=influxdata-docs + --label stage=test --mount type=volume,source=staged-content,target=/app/content --mount type=bind,src=./content,dst=/src/content --mount type=bind,src=./static/downloads,dst=/app/data @@ -38,7 +50,10 @@ function testStagedContent(paths, productPath) { echo 'No tests to run.'; \ exit 0; \ else \ - docker run --rm --name ${TEST} \ + docker run --rm \ + --label tag=influxdata-docs \ + --label stage=test \ + --name ${TEST} \ --env-file ${productPath}/.env.test \ --volumes-from ${CONTENT} \ influxdata-docs/pytest --codeblocks --exitfirst ${productPath}/; diff --git a/content/influxdb/cloud-dedicated/get-started/query.md b/content/influxdb/cloud-dedicated/get-started/query.md index b1f8dde0b..522dced59 100644 --- a/content/influxdb/cloud-dedicated/get-started/query.md +++ b/content/influxdb/cloud-dedicated/get-started/query.md @@ -1014,7 +1014,7 @@ _This tutorial assumes using Maven version 3.9, Java version >= 15, and an `infl - The `App`, `Write`, and `Query` classes belong to the `com.influxdbv3` package (your project **groupId**). - `App` defines a `main()` function that calls `Write.writeLineProtocol()` and `Query.querySQL()`. -4. In your terminal or editor, use Maven to to install dependencies and compile the project code--for example: +4. In your terminal or editor, use Maven to install dependencies and compile the project code--for example: diff --git a/content/influxdb/cloud-serverless/get-started/query.md b/content/influxdb/cloud-serverless/get-started/query.md index 97495746e..bbd5a56df 100644 --- a/content/influxdb/cloud-serverless/get-started/query.md +++ b/content/influxdb/cloud-serverless/get-started/query.md @@ -1008,7 +1008,7 @@ _This tutorial assumes using Maven version 3.9, Java version >= 15, and an `infl - The `App`, `Write`, and `Query` classes belong to the `com.influxdbv3` package (your project **groupId**). - `App` defines a `main()` function that calls `Write.writeLineProtocol()` and `Query.querySQL()`. -4. In your terminal or editor, use Maven to to install dependencies and compile the project code--for example: +4. In your terminal or editor, use Maven to install dependencies and compile the project code--for example: diff --git a/content/influxdb/clustered/get-started/query.md b/content/influxdb/clustered/get-started/query.md index dc5210227..0f704a444 100644 --- a/content/influxdb/clustered/get-started/query.md +++ b/content/influxdb/clustered/get-started/query.md @@ -1010,7 +1010,7 @@ _This tutorial assumes using Maven version 3.9, Java version >= 15, and an `infl - The `App`, `Write`, and `Query` classes belong to the `com.influxdbv3` package (your project **groupId**). - `App` defines a `main()` function that calls `Write.writeLineProtocol()` and `Query.querySQL()`. -4. In your terminal or editor, use Maven to to install dependencies and compile the project code--for example: +4. In your terminal or editor, use Maven to install dependencies and compile the project code--for example: diff --git a/test/src/prepare-content.sh b/test/src/prepare-content.sh index 42ba51e33..6620c5a16 100644 --- a/test/src/prepare-content.sh +++ b/test/src/prepare-content.sh @@ -24,6 +24,7 @@ function substitute_placeholders { s/f"API_TOKEN"/os.getenv("INFLUX_TOKEN")/g; s/f"BUCKET_NAME"/os.getenv("INFLUX_DATABASE")/g; s/f"DATABASE_NAME"/os.getenv("INFLUX_DATABASE")/g; + s/f"get-started"/os.getenv("INFLUX_DATABASE")/g; s|f"{{< influxdb/host >}}"|os.getenv("INFLUX_HOSTNAME")|g; s|f"RETENTION_POLICY_NAME\|RETENTION_POLICY"|"autogen"|g; ' $file @@ -76,8 +77,6 @@ setup() { prepare_tests() { TEST_FILES="$*" - # Remove files from the previous run. - rm -rf "$TEST_CONTENT"/* # Copy the test files to the target directory while preserving the directory structure. for FILE in $TEST_FILES; do # Create the parent directories of the destination file From 9c035fbd4d1ec9515db2cdc012afeb1399ad4597 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 1 Jul 2024 09:52:07 -0500 Subject: [PATCH 03/96] fix(ci): Simplify Vale terms list, support OAuth browser flow in tests - For influxctl OAuth flow, add support for writing auth URLs to a container-shared file monitored by the host during the pre-commit hook. - In prepare-content.sh, add Management API-associated placeholder substitutions. - Update CONTRIBUTING.md with env.test requirements. --- .husky/pre-commit | 2 + .lintstagedrc.mjs | 55 +++++---- CONTRIBUTING.md | 173 ++++++++++++++++++++++++++--- Dockerfile.pytest | 10 ++ Dockerfile.tests | 1 - test/.gitignore | 2 + test/README.md | 111 ------------------ test/env.test.example | 10 ++ test/src/conftest.py | 60 ++++++++++ test/src/monitor-container-urls.sh | 23 ++++ test/src/monitor-tests.sh | 13 +++ test/src/prepare-content.sh | 28 +++-- test/src/requirements.txt | 3 + 13 files changed, 324 insertions(+), 167 deletions(-) delete mode 100644 test/README.md create mode 100644 test/env.test.example create mode 100644 test/src/conftest.py create mode 100755 test/src/monitor-container-urls.sh create mode 100755 test/src/monitor-tests.sh diff --git a/.husky/pre-commit b/.husky/pre-commit index e3456b228..6e0acb87e 100644 --- a/.husky/pre-commit +++ b/.husky/pre-commit @@ -1 +1,3 @@ +./test/src/monitor-tests.sh start npx lint-staged --relative +./test/src/monitor-tests.sh kill diff --git a/.lintstagedrc.mjs b/.lintstagedrc.mjs index d07d962d6..66a00a12f 100644 --- a/.lintstagedrc.mjs +++ b/.lintstagedrc.mjs @@ -1,6 +1,6 @@ // Lint-staged configuration. This file must export a lint-staged configuration object. -function testStagedContent(paths, productPath) { +function pytestStagedContent(paths, productPath) { const productName = productPath.replace(/\//g, '-'); const CONTENT = `staged-${productName}`; const TEST = `pytest-${productName}`; @@ -26,7 +26,7 @@ function testStagedContent(paths, productPath) { `docker run --name ${CONTENT} --label tag=influxdata-docs --label stage=test - --mount type=volume,source=staged-content,target=/app/content + --mount type=volume,source=${CONTENT},target=/app/content --mount type=bind,src=./content,dst=/src/content --mount type=bind,src=./static/downloads,dst=/app/data influxdata-docs/tests --files "${paths.join(' ')}"`, @@ -36,28 +36,23 @@ function testStagedContent(paths, productPath) { -t influxdata-docs/pytest:latest`, // Run test runners. - // This script first checks if there are any tests to run using `pytest --collect-only`. - // If there are tests, it runs them; otherwise, it exits with a success code. + // Uses a pytest plugin to suppress exit code 5 (if no tests are found), + // This avoids needing to "pre-run" test collection in a subshell to check the exit code. + // Instead of the plugin, we could use a placeholder test that always or conditionally passes. // Whether tests pass or fail, the container is removed, // but the CONTENT container and associated volume will remain until the next run. - `sh -c "docker run --rm --name ${TEST}-collector \ + // Note: the "--network host" setting and `host-open` script are used to + // forward influxctl authentication URLs from the container to the host + // where they can be opened and approved in a host browser. + // Allowing "--network host" has security implications and isn't ideal. + `docker run --rm -t \ + --label tag=influxdata-docs \ + --label stage=test \ + --name ${TEST} \ --env-file ${productPath}/.env.test \ --volumes-from ${CONTENT} \ - influxdata-docs/pytest --codeblocks --collect-only \ - ${productPath}/ > /dev/null 2>&1; \ - TEST_COLLECT_EXIT_CODE=$?; \ - if [ $TEST_COLLECT_EXIT_CODE -eq 5 ]; then \ - echo 'No tests to run.'; \ - exit 0; \ - else \ - docker run --rm \ - --label tag=influxdata-docs \ - --label stage=test \ - --name ${TEST} \ - --env-file ${productPath}/.env.test \ - --volumes-from ${CONTENT} \ - influxdata-docs/pytest --codeblocks --exitfirst ${productPath}/; - fi"` + --mount type=bind,src=./test/shared,dst=/shared \ + influxdata-docs/pytest --codeblocks --suppress-no-test-exit-code --exitfirst ${productPath}/`, ]; } @@ -74,42 +69,42 @@ export default { "content/influxdb/cloud/**/*.md": paths => [ `.ci/vale/vale.sh --config .vale.ini --minAlertLevel error ${paths}`, - ...testStagedContent(paths, 'content/influxdb/cloud'), + ...pytestStagedContent(paths, 'content/influxdb/cloud'), ], - "content/influxdb/cloud-dedicated/**/*.md": + "content/influxdb/cloud-dedicated/**/*.md": paths => [ - `.ci/vale/vale.sh --config content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel error ${paths}`, - ...testStagedContent(paths, 'content/influxdb/cloud-dedicated'), - ], + `.ci/vale/vale.sh --config content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel error ${paths}`, + ...pytestStagedContent(paths, 'content/influxdb/cloud-dedicated'), + ], "content/influxdb/cloud-serverless/**/*.md": paths => [ `.ci/vale/vale.sh --config content/influxdb/cloud-serverless/.vale.ini --minAlertLevel error ${paths}`, - ...testStagedContent(paths, 'content/influxdb/cloud-serverless'), + ...pytestStagedContent(paths, 'content/influxdb/cloud-serverless'), ], "content/influxdb/clustered/**/*.md": paths => [ `.ci/vale/vale.sh --config content/influxdb/clustered/.vale.ini --minAlertLevel error ${paths}`, - ...testStagedContent(paths, 'content/influxdb/clustered'), + ...pytestStagedContent(paths, 'content/influxdb/clustered'), ], "content/influxdb/v1/**/*.md": paths => [ `.ci/vale/vale.sh --config .vale.ini --minAlertLevel error ${paths}`, - ...testStagedContent(paths, 'content/influxdb/v1'), + ...pytestStagedContent(paths, 'content/influxdb/v1'), ], "content/influxdb/v2/**/*.md": paths => [ `.ci/vale/vale.sh --config .vale.ini --minAlertLevel error ${paths}`, - ...testStagedContent(paths, 'content/influxdb/v2'), + ...pytestStagedContent(paths, 'content/influxdb/v2'), ], "content/telegraf/**/*.md": paths => [ `.ci/vale/vale.sh --config .vale.ini --minAlertLevel error ${paths}`, - ...testStagedContent(paths, 'content/telegraf'), + ...pytestStagedContent(paths, 'content/telegraf'), ], } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7aecc6bb7..6b93ebb0a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -85,40 +85,179 @@ git commit -m "" --no-verify For more options, see the [Husky documentation](https://typicode.github.io/husky/how-to.html#skipping-git-hooks). -### Configure test credentials +### Set up test scripts and credentials -To configure credentials for tests, set the usual InfluxDB environment variables -for each product inside a `content/influxdb//.env.test` file. +To set up your docs-v2 instance to run tests locally, do the following: -The Docker commands in the `.lintstagedrc.mjs` lint-staged configuration load -the `.env.test` as product-specific environment variables. +1. **Set executable permissions on test scripts** in `./test/src`: -**Warning**: To prevent accidentally adding credentials to the docs-v2 repo, + ```sh + chmod +x ./test/src/*.sh + ``` + +2. **Create credentials for tests**: + + - Create databases, buckets, and tokens for the product(s) you're testing. + - If you don't have access to a Clustered instance, you can use your +Cloud Dedicated instance for testing in most cases. To avoid conflicts when + running tests, create separate Cloud Dedicated and Clustered databases. + +3. **Create .env.test**: Copy the `./test/env.test.example` file into each + product directory to test and rename the file as `.env.test`--for example: + + ```sh + ./content/influxdb/cloud-dedicated/.env.test + ``` + +4. Inside each product's `.env.test` file, assign your InfluxDB credentials to + environment variables. + + In addition to the usual `INFLUX_` environment variables, in your + `cloud-dedicated/.env.test` and `clustered/.env.test` files define the + following variables: + + - `ACCOUNT_ID`, `CLUSTER_ID`: You can find these values in your `influxctl` + `config.toml` configuration file. + - `MANAGEMENT_TOKEN`: Use the `influxctl management create` command to generate + a long-lived management token to authenticate Management API requests + + For the full list of variables you'll need to include, see the substitution + patterns in `./test/src/prepare-content.sh`. + + **Warning**: The database you configure in `.env.test` and any written data may +be deleted during test runs. + + **Warning**: To prevent accidentally adding credentials to the docs-v2 repo, Git is configured to ignore `.env*` files. Don't add your `.env.test` files to Git. Consider backing them up on your local machine in case of accidental deletion. +5. For influxctl commands to run in tests, move or copy your `config.toml` file + to the `./test` directory. + ### Pre-commit linting and testing When you try to commit your changes using `git commit` or your editor, the project automatically runs pre-commit checks for spelling, punctuation, and style on your staged files. -The pre-commit hook calls [`lint-staged`](https://github.com/lint-staged/lint-staged) using the configuration in `.lintstagedrc.mjs`. +`.husky/pre-commit` script runs Git pre-commit hook commands, including +[`lint-staged`](https://github.com/lint-staged/lint-staged). -To run `lint-staged` scripts manually (without committing), enter the following -command in your terminal: +The `.lintstagedrc.mjs` lint-staged configuration maps product-specific glob +patterns to lint and test commands and passes a product-specific +`.env.test` file to a test runner Docker container. +The container then loads the `.env` file into the container's environment variables. -```sh -npx lint-staged --relative --verbose -``` +To test or troubleshoot testing and linting scripts and configurations before +committing, choose from the following: + +- To run pre-commit scripts without actually committing, append `exit 1` to the +`.husky/pre-commit` script--for example: + + ```sh + ./test/src/monitor-tests.sh start + npx lint-staged --relative + ./test/src/monitor-tests.sh kill + exit 1 + ``` + + And then run `git commit`. + + The `exit 1` status fails the commit, even if all the tasks succeed. + +- Use `yarn` to run one of the lint or test scripts configured in + `package.json`--for example: + + ```sh + yarn run test + ``` + +- Run `lint-staged` directly and specify options: + + ```sh + npx lint-staged --relative --verbose + ``` The pre-commit linting configuration checks for _error-level_ problems. -An error-level rule violation fails the commit and you must -fix the problems before you can commit your changes. +An error-level rule violation fails the commit and you must do one of the following before you can commit your changes: -If an error doesn't warrant a fix (for example, a term that should be allowed), -you can override the check and try the commit again or you can edit the linter -style rules to permanently allow the content. See **Configure style rules**. +- fix the reported problem in the content + +- edit the linter rules to permanently allow the content. + See **Configure style rules**. + +- temporarily override the hook (using `git commit --no-verify`) + +#### Test shell and python code blocks + +[pytest-codeblocks](https://github.com/nschloe/pytest-codeblocks/tree/main) extracts code from python and shell Markdown code blocks and executes assertions for the code. +If you don't assert a value (using a Python `assert` statement), `--codeblocks` considers a non-zero exit code to be a failure. + +**Note**: `pytest --codeblocks` uses Python's `subprocess.run()` to execute shell code. + +You can use this to test CLI and interpreter commands, regardless of programming +language, as long as they return standard exit codes. + +To make the documented output of a code block testable, precede it with the +`` tag and **omit the code block language +descriptor**--for example, in your Markdown file: + +##### Example markdown + +```python +print("Hello, world!") +``` + + + +The next code block is treated as an assertion. +If successful, the output is the following: + +``` +Hello, world! +``` + +For commands, such as `influxctl` CLI commands, that require launching an +OAuth URL in a browser, wrap the command in a subshell and redirect the output +to `/shared/urls.txt` in the container--for example: + +```sh +# Test the preceding command outside of the code block. +# influxctl authentication requires TTY interaction-- +# output the auth URL to a file that the host can open. +script -c "influxctl user list " \ + /dev/null > /shared/urls.txt +``` + +You probably don't want to display this syntax in the docs, which unfortunately +means you'd need to include the test block separately from the displayed code +block. +To hide it from users, wrap the code block inside an HTML comment. +Pytest-codeblocks will still collect and run the code block. + +##### Mark tests to skip + +pytest-codeblocks has features for skipping tests and marking blocks as failed. +To learn more, see the pytest-codeblocks README and tests. + +#### Troubleshoot tests + +### Pytest collected 0 items + +Potential reasons: + +- See the test discovery options in `pytest.ini`. +- For Python code blocks, use the following delimiter: + + ```python + # Codeblocks runs this block. + ``` + + `pytest --codeblocks` ignores code blocks that use the following: + + ```py + # Codeblocks ignores this block. + ``` ### Vale style linting diff --git a/Dockerfile.pytest b/Dockerfile.pytest index 1eeb122c1..f7bb443c9 100644 --- a/Dockerfile.pytest +++ b/Dockerfile.pytest @@ -26,10 +26,20 @@ ENV PYTHONUNBUFFERED=1 WORKDIR /app +# Create a mock xdg-open script` to prevent the test suite from attempting to open a browser (for example, during influxctl OAuth2 authentication), and instead execute the host-open script. +RUN echo '#!/bin/bash' > /usr/local/bin/xdg-open \ + && echo 'echo "$1" > /shared/urls.txt' >> /usr/local/bin/xdg-open \ + && echo 'echo "$1" >> /shared/host_open.log' >> /usr/local/bin/xdg-open \ + && chmod +x /usr/local/bin/xdg-open + # Some Python test dependencies (pytest-dotenv and pytest-codeblocks) aren't # available as packages in apt-cache, so use pip to download dependencies in a # separate step and use Docker's caching. +# Pytest configuration file. COPY ./test/src/pytest.ini pytest.ini +# Python and Pytest dependencies. COPY ./test/src/requirements.txt requirements.txt +# Pytest fixtures. +COPY ./test/src/conftest.py conftest.py RUN pip install -Ur requirements.txt # Activate the Python virtual environment configured in the Dockerfile. diff --git a/Dockerfile.tests b/Dockerfile.tests index b1bc12e4f..852ce2e04 100644 --- a/Dockerfile.tests +++ b/Dockerfile.tests @@ -1,7 +1,6 @@ # Use the Dockerfile 1.2 syntax to leverage BuildKit features like cache mounts and inline mounts--temporary mounts that are only available during the build step, not at runtime. # syntax=docker/dockerfile:1.2 -# Starting from a Go base image is easier than setting up the Go environment later. FROM python:3.9-slim # Install the necessary packages for the test environment. diff --git a/test/.gitignore b/test/.gitignore index a8aabc891..21065a63c 100644 --- a/test/.gitignore +++ b/test/.gitignore @@ -2,7 +2,9 @@ /Cargo.lock config.toml content +monitor_urls_pid node_modules +shared tmp .config* .env* diff --git a/test/README.md b/test/README.md deleted file mode 100644 index 57582adda..000000000 --- a/test/README.md +++ /dev/null @@ -1,111 +0,0 @@ -Test code blocks in Markdown files. - -This project contains the following: - -- `test.sh`: The primary entrypoint for running tests. - Copies Markdown files to a temporary directory shared with the `test` Docker image and runs the test container. -- `test/run-tests.sh`: The Docker image entrypoint. - Substitutes placeholders with environment variables in code blocks. - Passes test files to test runners (for example, `pytest --codeblocks` for Python and shell code samples). -- `compose.yaml` and `Dockerfile`: Docker image for the **test** service that installs test dependencies and passes test files to test runners. - -## Set configuration values - -To set your InfluxDB credentials for testing, create a `.env.` file and add key=value properties--for example, in `.env.serverless` - -```text -INFLUX_HOST=https://us-east-1-1.aws.cloud2.influxdata.com -INFLUX_HOSTNAME=us-east-1-1.aws.cloud2.influxdata.com -INFLUX_TOKEN=5Vz... -INFLUX_ORG=28d... -INFLUX_DATABASE=jason-test-create-bucket -INFLUX_RETENTION_POLICY=test-autogen -``` - -Storing configuration properties in a `.env` (dotenv) file is generally preferable to using environment variables. - -## Build the image - -1. Install Docker for your system. - -2. Build the Docker image. - - ```shell - docker compose build test - ``` - - After editing configuration or files used by the image, re-run the preceding build command. - -## Run tests - -Test code blocks in Markdown files that have changed relative to your git `master` branch: - -```sh -sh test.sh -``` - -Test code blocks in files that match a pattern: - -```sh -sh test.sh ./content/**/*.md -``` - -`test.sh` copies files into `./test/tmp/` for testing and runs the tests in Docker. - -### Test runners - -_Experimental--work in progress_ - -[pytest-codeblocks](https://github.com/nschloe/pytest-codeblocks/tree/main) extracts code from python and shell Markdown code blocks and executes assertions for the code. -If you don't assert a value, `--codeblocks` considers a non-zero exit code to be a failure. -_Note_: `pytest --codeblocks` uses Python's `subprocess.run()` to execute shell code. - -To assert (and display) the expected output of your code, follow the code block with the `` comment tag, and then the expected output in a code block--for example: - - - -```python -print("Hello, world!") -``` - - - -If successful, the output is the following: - -``` -Hello, world! -``` - - - -pytest-codeblocks has features for skipping tests and marking blocks as failed. -To learn more, see the pytest-codeblocks README and tests. - -#### Other tools and ideas for testing code blocks - -The `codedown` NPM package extracts code from Markdown code blocks for each language and -can pipe the output to a test runner for the language. - -`pytest` and `pytest-codeblocks` use the Python `Assertions` module to keep testing overhead low. -Node.js also provides an `Assertions` package. - -The `runmd` NPM package runs `javascript` code blocks in Markdown and generates a new Markdown file with the code block output inserted. - -## Troubleshoot - -### Pytest collected 0 items - -Potential reasons: - -- See the test discovery options in `pytest.ini`. -- For Python code blocks, use the following delimiter: - - ```python - # Codeblocks runs this block. - ``` - - `pytest --codeblocks` ignores code blocks that use the following: - - ```py - # Codeblocks ignores this block. - ``` \ No newline at end of file diff --git a/test/env.test.example b/test/env.test.example new file mode 100644 index 000000000..7faf61dd3 --- /dev/null +++ b/test/env.test.example @@ -0,0 +1,10 @@ +ACCOUNT_ID= +CLUSTER_ID= +INFLUX_ORG= +INFLUX_TOKEN= +INFLUX_HOST=https:// +INFLUX_HOSTNAME= +INFLUX_DATABASE=myname-test-dedicated +INFLUX_USER_NAME= +INFLUX_RETENTION_POLICY=test-autogen +MANAGEMENT_TOKEN= \ No newline at end of file diff --git a/test/src/conftest.py b/test/src/conftest.py new file mode 100644 index 000000000..193237ae9 --- /dev/null +++ b/test/src/conftest.py @@ -0,0 +1,60 @@ +# Test setup fixtures + +import pytest +import requests +import os + +def v3_management_api(): + return { + 'url': os.getenv('INFLUX_HOST') + '/api/v0/accounts/' + os.getenv('ACCOUNT_ID') + '/clusters/' + os.getenv('CLUSTER_ID'), + 'headers': { + 'Content-Type': 'application/json', + 'Accept': 'application/json', + 'Authorization': 'Bearer ' + os.getenv('MANAGEMENT_TOKEN') + } + } + + +@pytest.fixture +def create_v3_token(): + api = v3_management_api() + url = api['url'] + '/databases/' + os.getenv('INFLUX_DATABASE') + headers = api['headers'] + data = { + "description": "v3 read token", + "permissions": [ + { + "action": "read", + "resource": os.getenv('INFLUX_DATABASE') + }, + { + "action": "write", + "resource": os.getenv('INFLUX_DATABASE') + }, + ] + } + response = requests.post(url, data=data, headers=headers) + return response.json()['token'] + +# Example test function using the setup_v3_db fixture: +# def test_setup(setup_v3_db): + # database, token = setup_v3_db + # assert database + # assert token +@pytest.fixture +def set_token(): + try: + token = create_v3_token() + os.environ.update({'INFLUX_TOKEN': token}) + yield token + except Exception as e: + print(e) + assert False + +@pytest.fixture +def delete_v3_database(): + api = v3_management_api() + url = api['url'] + '/databases/' + os.getenv('INFLUX_TEMP_DATABASE') + headers = api['headers'] + response = requests.delete(url, headers=headers) + return response.json()['name'] \ No newline at end of file diff --git a/test/src/monitor-container-urls.sh b/test/src/monitor-container-urls.sh new file mode 100755 index 000000000..d9113d1c1 --- /dev/null +++ b/test/src/monitor-container-urls.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +## This script is meant to be run on the host and monitors a file for URLs written by a container. + +# The file to monitor for URLs written by the container. +FILE="./test/shared/urls.txt" +# Define the URL pattern for OAuth2 authorization. +OAUTH_PATTERN='https://auth\.influxdata\.com/activate\?user_code=[A-Z]{1,8}-[A-Z]{1,8}' + +# Loop indefinitely +while true; do + if [ -f "$FILE" ]; then + # Extract an OAuth2 authorization URL from the file + URL=$(grep -Eo "$OAUTH_PATTERN" "$FILE") + if [ "$URL" ]; then + # Open the URL in the default browser + open "$URL" + # Clear the file to indicate the URL has been handled + > "$FILE" + fi + fi + sleep 1 +done diff --git a/test/src/monitor-tests.sh b/test/src/monitor-tests.sh new file mode 100755 index 000000000..f3aa56c5c --- /dev/null +++ b/test/src/monitor-tests.sh @@ -0,0 +1,13 @@ +function start { + ./test/src/monitor-container-urls.sh & echo $! > ./test/monitor_urls_pid +} + +function kill_process { + PID=$(cat ./test/monitor_urls_pid) && kill -9 $PID && rm ./test/monitor_urls_pid +} + +case "$1" in + start) start ;; + kill) kill_process ;; + *) echo "Usage: $0 {start|kill}" ;; +esac \ No newline at end of file diff --git a/test/src/prepare-content.sh b/test/src/prepare-content.sh index 6620c5a16..6490ae585 100644 --- a/test/src/prepare-content.sh +++ b/test/src/prepare-content.sh @@ -20,12 +20,15 @@ function substitute_placeholders { # Use f-strings to identify placeholders in Python while also keeping valid syntax if # the user replaces the value. # Remember to import os for your example code. - sed -i 's/f"DATABASE_TOKEN"/os.getenv("INFLUX_TOKEN")/g; + sed -i 's/f"ACCOUNT_ID"/os.getenv("ACCOUNT_ID")/g; s/f"API_TOKEN"/os.getenv("INFLUX_TOKEN")/g; s/f"BUCKET_NAME"/os.getenv("INFLUX_DATABASE")/g; + s/f"CLUSTER_ID"/os.getenv("CLUSTER_ID")/g; s/f"DATABASE_NAME"/os.getenv("INFLUX_DATABASE")/g; + s/f"DATABASE_TOKEN"/os.getenv("INFLUX_TOKEN")/g; s/f"get-started"/os.getenv("INFLUX_DATABASE")/g; s|f"{{< influxdb/host >}}"|os.getenv("INFLUX_HOSTNAME")|g; + s/f"MANAGEMENT_TOKEN"/os.getenv("MANAGEMENT_TOKEN")/g; s|f"RETENTION_POLICY_NAME\|RETENTION_POLICY"|"autogen"|g; ' $file @@ -35,15 +38,24 @@ function substitute_placeholders { s|"name": "BUCKET_NAME"|"name": "$INFLUX_DATABASE"|g;' \ $file - sed -i 's/API_TOKEN/$INFLUX_TOKEN/g; - s/ORG_ID/$INFLUX_ORG/g; - s/DATABASE_TOKEN/$INFLUX_TOKEN/g; - s/--bucket-id BUCKET_ID/--bucket-id $INFLUX_BUCKET_ID/g; - s/BUCKET_NAME/$INFLUX_DATABASE/g; - s/DATABASE_NAME/$INFLUX_DATABASE/g; + sed -i 's|"influxctl database create --retention-period 1y get-started"|"influxctl database create --retention-period 1y $INFLUX_TMP_DATABASE"|g;' \ + $file + + # Replace remaining placeholders with variables. + # If the placeholder is inside of a Python os.getenv() function, don't replace it. + # Note the specific use of double quotes for the os.getenv() arguments here. You'll need to use double quotes in your code samples for this to match. + sed -i '/os.getenv("ACCOUNT_ID")/! s/ACCOUNT_ID/$ACCOUNT_ID/g; + /os.getenv("API_TOKEN")/! s/API_TOKEN/$INFLUX_TOKEN/g; + /os.getenv("BUCKET_ID")/! s/--bucket-id BUCKET_ID/--bucket-id $INFLUX_BUCKET_ID/g; + /os.getenv("BUCKET_NAME")/! s/BUCKET_NAME/$INFLUX_DATABASE/g; + /os.getenv("CLUSTER_ID")/! s/CLUSTER_ID/$CLUSTER_ID/g; + /os.getenv("DATABASE_TOKEN")/! s/DATABASE_TOKEN/$INFLUX_TOKEN/g; + /os.getenv("DATABASE_NAME")/! s/DATABASE_NAME/$INFLUX_DATABASE/g; s/--id DBRP_ID/--id $INFLUX_DBRP_ID/g; s/get-started/$INFLUX_DATABASE/g; - s/RETENTION_POLICY_NAME\|RETENTION_POLICY/$INFLUX_RETENTION_POLICY/g; + /os.getenv("MANAGEMENT_TOKEN")/! s/MANAGEMENT_TOKEN/$MANAGEMENT_TOKEN/g; + /os.getenv("ORG_ID")/! s/ORG_ID/$INFLUX_ORG/g; + /os.getenv("RETENTION_POLICY")/! s/RETENTION_POLICY_NAME\|RETENTION_POLICY/$INFLUX_RETENTION_POLICY/g; s/CONFIG_NAME/CONFIG_$(shuf -i 0-100 -n1)/g;' \ $file diff --git a/test/src/requirements.txt b/test/src/requirements.txt index f871ed228..7d366d868 100644 --- a/test/src/requirements.txt +++ b/test/src/requirements.txt @@ -4,6 +4,9 @@ pytest-cov>=2.12.1 pytest-codeblocks>=0.16.1 python-dotenv>=1.0.0 pytest-dotenv>=0.5.2 +# Allow pytest to pass if no tests (i.e. testable code blocks) are found. +pytest-custom-exit-code>=0.3.0 +requests>=2.26.0 # Code sample dependencies influxdb3-python @ git+https://github.com/InfluxCommunity/influxdb3-python@v0.5.0 influxdb3-python-cli @ git+https://github.com/InfluxCommunity/influxdb3-python-cli@main From 75f3e996b371b4424f0e866478031ae09347720d Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 1 Jul 2024 13:39:58 -0500 Subject: [PATCH 04/96] chore(v3): Improve linting, test influxctl: - Remove date format linting in table cells. Copies Google's DateFormat style config to InfluxDataDocs style with modified scope. - Update styles (vale sync) - Skip database create in tests. - Add example test for table create. - Fix linting errors. - Add TEST_RUN placeholder replacement in prepare-content.sh. --- .ci/vale/styles/InfluxDataDocs/DateFormat.yml | 11 + .ci/vale/styles/write-good/Weasel.yml | 198 +----------------- .vale.ini | 1 + content/influxdb/cloud-dedicated/.vale.ini | 1 + .../define-custom-partitions.md | 50 ++++- content/influxdb/cloud-serverless/.vale.ini | 1 + content/influxdb/clustered/.vale.ini | 1 + .../define-custom-partitions.md | 50 ++++- test/src/prepare-content.sh | 3 +- 9 files changed, 119 insertions(+), 197 deletions(-) create mode 100644 .ci/vale/styles/InfluxDataDocs/DateFormat.yml diff --git a/.ci/vale/styles/InfluxDataDocs/DateFormat.yml b/.ci/vale/styles/InfluxDataDocs/DateFormat.yml new file mode 100644 index 000000000..5dfb7810f --- /dev/null +++ b/.ci/vale/styles/InfluxDataDocs/DateFormat.yml @@ -0,0 +1,11 @@ +extends: existence +message: "Use 'July 31, 2016' format, not '%s'." +link: 'https://developers.google.com/style/dates-times' +ignorecase: true +level: error +nonword: true +scope: + - ~table.cell +tokens: + - '\d{1,2}(?:\.|/)\d{1,2}(?:\.|/)\d{4}' + - '\d{1,2} (?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)|May|Jun(?:e)|Jul(?:y)|Aug(?:ust)|Sep(?:tember)?|Oct(?:ober)|Nov(?:ember)?|Dec(?:ember)?) \d{4}' diff --git a/.ci/vale/styles/write-good/Weasel.yml b/.ci/vale/styles/write-good/Weasel.yml index e29391444..d1d90a7bc 100644 --- a/.ci/vale/styles/write-good/Weasel.yml +++ b/.ci/vale/styles/write-good/Weasel.yml @@ -3,205 +3,27 @@ message: "'%s' is a weasel word!" ignorecase: true level: warning tokens: - - absolutely - - accidentally - - additionally - - allegedly - - alternatively - - angrily - - anxiously - - approximately - - awkwardly - - badly - - barely - - beautifully - - blindly - - boldly - - bravely - - brightly - - briskly - - bristly - - bubbly - - busily - - calmly - - carefully - - carelessly - - cautiously - - cheerfully - clearly - - closely - - coldly - completely - - consequently - - correctly - - courageously - - crinkly - - cruelly - - crumbly - - cuddly - - currently - - daily - - daringly - - deadly - - definitely - - deliberately - - doubtfully - - dumbly - - eagerly - - early - - easily - - elegantly - - enormously - - enthusiastically - - equally - - especially - - eventually - - exactly - exceedingly - - exclusively + - excellent - extremely - fairly - - faithfully - - fatally - - fiercely - - finally - - fondly - - few - - foolishly - - fortunately - - frankly - - frantically - - generously - - gently - - giggly - - gladly - - gracefully - - greedily - - happily - - hardly - - hastily - - healthily - - heartily - - helpfully - - honestly - - hourly - - hungrily - - hurriedly - - immediately - - impatiently - - inadequately - - ingeniously - - innocently - - inquisitively + - huge - interestingly - - irritably - - jiggly - - joyously - - justly - - kindly + - is a number - largely - - lately - - lazily - - likely - - literally - - lonely - - loosely - - loudly - - loudly - - luckily - - madly - - many - - mentally - - mildly - - monthly - - mortally - mostly - - mysteriously - - neatly - - nervously - - nightly - - noisily - - normally - - obediently - - occasionally - - only - - openly - - painfully - - particularly - - patiently - - perfectly - - politely - - poorly - - powerfully - - presumably - - previously - - promptly - - punctually - - quarterly - - quickly - - quietly - - rapidly - - rarely - - really - - recently - - recklessly - - regularly - - remarkably + - obviously + - quite - relatively - - reluctantly - - repeatedly - - rightfully - - roughly - - rudely - - sadly - - safely - - selfishly - - sensibly - - seriously - - sharply - - shortly - - shyly + - remarkably + - several - significantly - - silently - - simply - - sleepily - - slowly - - smartly - - smelly - - smoothly - - softly - - solemnly - - sparkly - - speedily - - stealthily - - sternly - - stupidly - substantially - - successfully - - suddenly - surprisingly - - suspiciously - - swiftly - - tenderly - - tensely - - thoughtfully - - tightly - - timely - - truthfully - - unexpectedly - - unfortunately + - tiny - usually + - various + - vast - very - - victoriously - - violently - - vivaciously - - warmly - - waverly - - weakly - - wearily - - weekly - - wildly - - wisely - - worldly - - wrinkly - - yearly diff --git a/.vale.ini b/.vale.ini index 88c1fcd2f..8d4262290 100644 --- a/.vale.ini +++ b/.vale.ini @@ -9,6 +9,7 @@ Packages = Google, write-good, Hugo [*.md] BasedOnStyles = Vale, InfluxDataDocs, Google, write-good +Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO diff --git a/content/influxdb/cloud-dedicated/.vale.ini b/content/influxdb/cloud-dedicated/.vale.ini index 6d35f96b0..8973c692c 100644 --- a/content/influxdb/cloud-dedicated/.vale.ini +++ b/content/influxdb/cloud-dedicated/.vale.ini @@ -9,6 +9,7 @@ Packages = Google, write-good, Hugo [*.md] BasedOnStyles = Vale, InfluxDataDocs, Cloud-Dedicated, Google, write-good +Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO diff --git a/content/influxdb/cloud-dedicated/admin/custom-partitions/define-custom-partitions.md b/content/influxdb/cloud-dedicated/admin/custom-partitions/define-custom-partitions.md index 7974c595c..46129a41b 100644 --- a/content/influxdb/cloud-dedicated/admin/custom-partitions/define-custom-partitions.md +++ b/content/influxdb/cloud-dedicated/admin/custom-partitions/define-custom-partitions.md @@ -33,7 +33,7 @@ table. #### Partition templates can only be applied on create You can only apply a partition template when creating a database or table. -There is no way to update a partition template on an existing resource. +You can't update a partition template on an existing resource. {{% /note %}} Use the following command flags to identify @@ -71,6 +71,9 @@ The following example creates a new `example-db` database and applies a partitio template that partitions by distinct values of two tags (`room` and `sensor-type`), bucketed values of the `customerID` tag, and by week using the time format `%Y wk:%W`: + + + ```sh influxctl database create \ --template-tag room \ @@ -82,21 +85,60 @@ influxctl database create \ ## Create a table with a custom partition template -The following example creates a new `example-table` table in the `example-db` +The following example creates a new `example-table` table in the specified database and applies a partition template that partitions by distinct values of two tags (`room` and `sensor-type`), bucketed values of the `customerID` tag, and by month using the time format `%Y-%m`: + + + +{{% code-placeholders "DATABASE_NAME" %}} + ```sh influxctl table create \ --template-tag room \ --template-tag sensor-type \ --template-tag-bucket customerID,500 \ --template-timeformat '%Y-%m' \ - example-db \ + DATABASE_NAME \ example-table ``` +{{% /code-placeholders %}} + +Replace the following in your command: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: your {{% product-name %}} [database](/influxdb/cloud-dedicated/admin/databases/) + + + ## Example partition templates Given the following [line protocol](/influxdb/cloud-dedicated/reference/syntax/line-protocol/) @@ -108,7 +150,7 @@ prod,line=A,station=weld1 temp=81.9,qty=36i 1704067200000000000 ##### Partitioning by distinct tag values -| Description | Tag part(s) | Time part | Resulting partition key | +| Description | Tag parts | Time part | Resulting partition key | | :---------------------- | :---------------- | :--------- | :----------------------- | | By day (default) | | `%Y-%m-%d` | 2024-01-01 | | By day (non-default) | | `%d %b %Y` | 01 Jan 2024 | diff --git a/content/influxdb/cloud-serverless/.vale.ini b/content/influxdb/cloud-serverless/.vale.ini index 191227f55..43840007e 100644 --- a/content/influxdb/cloud-serverless/.vale.ini +++ b/content/influxdb/cloud-serverless/.vale.ini @@ -9,6 +9,7 @@ Packages = Google, write-good, Hugo [*.md] BasedOnStyles = Vale, InfluxDataDocs, Cloud-Serverless, Google, write-good +Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO diff --git a/content/influxdb/clustered/.vale.ini b/content/influxdb/clustered/.vale.ini index 2095bfd0c..193ae44b3 100644 --- a/content/influxdb/clustered/.vale.ini +++ b/content/influxdb/clustered/.vale.ini @@ -9,6 +9,7 @@ Packages = Google, write-good, Hugo [*.md] BasedOnStyles = Vale, InfluxDataDocs, Clustered, Google, write-good +Google.DateFormat = NO Google.Ellipses = NO Google.Headings = NO Google.WordList = NO diff --git a/content/influxdb/clustered/admin/custom-partitions/define-custom-partitions.md b/content/influxdb/clustered/admin/custom-partitions/define-custom-partitions.md index 9b7b7ecf1..ffd7d61c0 100644 --- a/content/influxdb/clustered/admin/custom-partitions/define-custom-partitions.md +++ b/content/influxdb/clustered/admin/custom-partitions/define-custom-partitions.md @@ -33,7 +33,7 @@ table. #### Partition templates can only be applied on create You can only apply a partition template when creating a database or table. -There is no way to update a partition template on an existing resource. +You can't update a partition template on an existing resource. {{% /note %}} Use the following command flags to identify @@ -71,6 +71,9 @@ The following example creates a new `example-db` database and applies a partitio template that partitions by distinct values of two tags (`room` and `sensor-type`), bucketed values of the `customerID` tag, and by week using the time format `%Y wk:%W`: + + + ```sh influxctl database create \ --template-tag room \ @@ -82,21 +85,60 @@ influxctl database create \ ## Create a table with a custom partition template -The following example creates a new `example-table` table in the `example-db` +The following example creates a new `example-table` table in the specified database and applies a partition template that partitions by distinct values of two tags (`room` and `sensor-type`), bucketed values of the `customerID` tag, and by month using the time format `%Y-%m`: + + + +{{% code-placeholders "DATABASE_NAME" %}} + ```sh influxctl table create \ --template-tag room \ --template-tag sensor-type \ --template-tag-bucket customerID,500 \ --template-timeformat '%Y-%m' \ - example-db \ + DATABASE_NAME \ example-table ``` +{{% /code-placeholders %}} + +Replace the following in your command: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: your {{% product-name %}} [database](/influxdb/clustered/admin/databases/) + + + ## Example partition templates Given the following [line protocol](/influxdb/clustered/reference/syntax/line-protocol/) @@ -108,7 +150,7 @@ prod,line=A,station=weld1 temp=81.9,qty=36i 1704067200000000000 ##### Partitioning by distinct tag values -| Description | Tag part(s) | Time part | Resulting partition key | +| Description | Tag parts | Time part | Resulting partition key | | :---------------------- | :---------------- | :--------- | :----------------------- | | By day (default) | | `%Y-%m-%d` | 2024-01-01 | | By day (non-default) | | `%d %b %Y` | 01 Jan 2024 | diff --git a/test/src/prepare-content.sh b/test/src/prepare-content.sh index 6490ae585..a41d4b6d4 100644 --- a/test/src/prepare-content.sh +++ b/test/src/prepare-content.sh @@ -56,7 +56,8 @@ function substitute_placeholders { /os.getenv("MANAGEMENT_TOKEN")/! s/MANAGEMENT_TOKEN/$MANAGEMENT_TOKEN/g; /os.getenv("ORG_ID")/! s/ORG_ID/$INFLUX_ORG/g; /os.getenv("RETENTION_POLICY")/! s/RETENTION_POLICY_NAME\|RETENTION_POLICY/$INFLUX_RETENTION_POLICY/g; - s/CONFIG_NAME/CONFIG_$(shuf -i 0-100 -n1)/g;' \ + s/CONFIG_NAME/CONFIG_$(shuf -i 0-100 -n1)/g; + s/TEST_RUN/TEST_RUN_$(date +%s)/g' \ $file # v2-specific replacements. From cf9756c0f1f1931ae33b79699743377234273433 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 1 Jul 2024 16:39:39 -0500 Subject: [PATCH 05/96] chore(Dedicated): Update management tokena and Management API info. --- .../cloud-dedicated/get-started/_index.md | 22 +++- .../cloud-dedicated/get-started/setup.md | 24 ++-- .../reference/internals/security.md | 123 ++++++++++++------ 3 files changed, 116 insertions(+), 53 deletions(-) diff --git a/content/influxdb/cloud-dedicated/get-started/_index.md b/content/influxdb/cloud-dedicated/get-started/_index.md index 621a0952b..768841557 100644 --- a/content/influxdb/cloud-dedicated/get-started/_index.md +++ b/content/influxdb/cloud-dedicated/get-started/_index.md @@ -178,15 +178,29 @@ one of the following [token](/influxdb/cloud-dedicated/admin/tokens/) types: - **Database token**: A token that grants read and write access to InfluxDB databases. -- **Management token**: A short-lived (1 hour) [Auth0 token](#) used to - administer your InfluxDB cluster. These are generated by the `influxctl` CLI - and do not require any direct management. Management tokens authorize a user - to perform tasks related to: +- **Management token**: + [Auth0 authentication token](/influxdb/cloud-dedicated/reference/internals/security/#access-authentication-and-authorization) generated by the `influxctl` CLI and used to administer your InfluxDB cluster. +Management tokens authorize a user to perform tasks related to: - Account management - Database management - Database token management - Pricing + +By default, management tokens are + + - short-lived + - issued for a specific user + - issued by an OAuth2 identity provider + - managed by `influxctl` and don't require management by users + +However, for automation purposes, an `influxctl` user can +[manually create a long-lived +management token](/influxdb/cloud-dedicated/admin/tokens/management/#create-a-management-token) +for use with the +[Management API for Cloud Dedicated](/influxdb/cloud-dedicated/api/management). +Manually-created management tokens authenticate directly with your InfluxDB +cluster and don't require human interaction with your identity provider. {{< page-nav next="/influxdb/clustered/get-started/setup/" >}} diff --git a/content/influxdb/cloud-dedicated/get-started/setup.md b/content/influxdb/cloud-dedicated/get-started/setup.md index 7d2ab2256..d91c700ee 100644 --- a/content/influxdb/cloud-dedicated/get-started/setup.md +++ b/content/influxdb/cloud-dedicated/get-started/setup.md @@ -45,13 +45,13 @@ following information: ## Download, install, and configure the influxctl CLI The [`influxctl` CLI](/influxdb/cloud-dedicated/reference/cli/influxctl/) -provides a simple way to manage your InfluxDB Cloud Dedicated cluster from a -command line. It lets you perform administrative tasks such as managing +lets you manage your {{< product-name omit="Clustered" >}} cluster from a +command line and perform administrative tasks such as managing databases and tokens. 1. [Download and install the `influxctl` CLI](/influxdb/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl). -2. **Create a connection profile and provide your InfluxDB Cloud Dedicated connection credentials**. +2. **Create a connection profile and provide your {{< product-name omit="Clustered" >}} connection credentials**. The `influxctl` CLI uses [connection profiles](/influxdb/cloud-dedicated/reference/cli/influxctl/#configure-connection-profiles) to connect to and authenticate with your InfluxDB Cloud Dedicated cluster. @@ -72,7 +72,7 @@ If stored at a non-default location, include the `--config` flag with each {{% /note %}} - **Copy and paste the sample configuration profile code** into your `config.toml`: +3. **Copy and paste the sample configuration profile code** into your `config.toml`: {{% code-placeholders "ACCOUNT_ID|CLUSTER_ID" %}} @@ -97,10 +97,11 @@ _For detailed information about `influxctl` profiles, see ## Create a database -Use the [`influxctl database create` command](/influxdb/cloud-dedicated/reference/cli/influxctl/database/create/) +Use the +[`influxctl database create` command](/influxdb/cloud-dedicated/reference/cli/influxctl/database/create/) to create a database. You can use an existing database or create a new one specifically for this getting started tutorial. -_Examples in this getting started tutorial assume a database named **"get-started"**._ +_Examples in this getting started tutorial assume a database named `get-started`._ {{% note %}} @@ -109,15 +110,19 @@ _Examples in this getting started tutorial assume a database named **"get-starte The first time you run an `influxctl` CLI command, you are directed to login to **Auth0**. Once logged in, Auth0 issues a short-lived (1 hour) management token for the `influxctl` CLI that grants administrative access -to your InfluxDB Cloud Dedicated cluster. +to your {{< product-name omit="Clustered" >}} cluster. {{% /note %}} Provide the following: - Database name. -- _Optional:_ Database [retention period](/influxdb/cloud-dedicated/admin/databases/#retention-periods) +- _Optional:_ Database + [retention period](/influxdb/cloud-dedicated/admin/databases/#retention-periods) as a duration value. If no retention period is specified, the default is infinite. + + + {{% code-placeholders "get-started|1y" %}} ```sh @@ -128,7 +133,8 @@ influxctl database create --retention-period 1y get-started ## Create a database token -Use the [`influxctl token create` command](/influxdb/cloud-dedicated/reference/cli/influxctl/token/create/) +Use the +[`influxctl token create` command](/influxdb/cloud-dedicated/reference/cli/influxctl/token/create/) to create a database token with read and write permissions for your database. Provide the following: diff --git a/content/influxdb/cloud-dedicated/reference/internals/security.md b/content/influxdb/cloud-dedicated/reference/internals/security.md index 857ffee80..915c486c4 100644 --- a/content/influxdb/cloud-dedicated/reference/internals/security.md +++ b/content/influxdb/cloud-dedicated/reference/internals/security.md @@ -14,7 +14,7 @@ InfluxData's information security program is based on industry-recognized standa including but not limited to ISO 27001, NIST 800-53, CIS20, and SOC2 Type II. The security policy describes the secure development, deployment, and operation of InfluxDB Cloud. -To protect data, InfluxDB Cloud Dedicated includes the following: +To protect data, {{% product-name %}} includes the following: - Guaranteed [tenant isolation](#tenant-isolation) and [data integrity](#data-integrity). - Trusted cloud infrastructure @@ -48,30 +48,30 @@ To protect data, InfluxDB Cloud Dedicated includes the following: ## Tenant isolation -In the InfluxDB Cloud Dedicated platform, access controls ensure that only valid +In the {{% product-name %}} platform, access controls ensure that only valid authenticated and authorized requests access your account data. Access control includes: -- A unique cluster ID assigned to each InfluxDB Cloud Dedicated cluster. +- A unique cluster ID assigned to each {{% product-name %}} cluster. All internal Cloud services require this cluster ID to authenticate entities before accessing or operating on data. - All external requests must be authorized with a valid token or session. - Every InfluxDB Cloud Dedicated service enforces this policy. + Every {{% product-name %}} service enforces this policy. ## Data integrity A dedicated internal service ensures data integrity by periodically creating, recording, and writing test data into test buckets. The service periodically executes queries to ensure the data hasn't been lost or corrupted. -A separate instance of this service lives within each InfluxDB Cloud Dedicated cluster. +A separate instance of this service lives within each {{% product-name %}} cluster. Additionally, the service creates out-of-band backups in [line protocol](https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/), and ensures the backup data matches the data on disk. ## Cloud infrastructure -![InfluxDB Cloud Dedicated cluster architecture](https://docs.influxdata.com/img/influxdb/cloud-internals-cluster.png) +![{{% product-name %}} cluster architecture](https://docs.influxdata.com/img/influxdb/cloud-internals-cluster.png) -InfluxDB Cloud Dedicated is available on the following cloud providers: +{{% product-name %}} is available on the following cloud providers: - [Amazon Web Services (AWS)](https://aws.amazon.com/) - [Microsoft Azure](https://azure.microsoft.com/en-us/) _(Coming)_ @@ -80,17 +80,17 @@ InfluxDB Cloud Dedicated is available on the following cloud providers: To ensure data security, availability, and durability, each instance is isolated and protected in its own virtual private cloud (VPC). -Users interact with InfluxDB Cloud Dedicated only through Cloud Dedicated established APIs. +Users interact with {{% product-name %}} only through Cloud Dedicated established APIs. For cluster management activities, authorized users interact with the Granite service. For workload clusters, authorized users interact with APIs for InfluxDB v3 Ingesters (writes) and Queriers (reads). These services don't expose AWS S3 or other cloud provider or internal services. -InfluxDB Cloud Dedicated uses separate S3 buckets for each customer's cluster to persist writes. +{{% product-name %}} uses separate S3 buckets for each customer's cluster to persist writes. The S3 buckets are only accessible by the customer's cluster services. Separate configuration ensures one customer's S3 buckets cannot be accessed by another customer (for example, in the event of a service defect). ### Amazon Web Services (AWS) -An instance of InfluxDB Cloud Dedicated consists of microservices in Kubernetes. +An instance of {{% product-name %}} consists of microservices in Kubernetes. Each VPC within AWS is segmented into public and private subnets: - The public subnet contains resources exposed to the public internet, including @@ -103,7 +103,7 @@ For detail about AWS's physical security and data center protocols, see [AWS's C ### Google Cloud Platform (GCP) -In Google Cloud Platform (GCP), InfluxDB Cloud Dedicated uses the Google Kubernetes Engine (GKE) +In Google Cloud Platform (GCP), {{% product-name %}} uses the Google Kubernetes Engine (GKE) and Google Compute Engine to deploy individual cluster components. Clusters are isolated at the project level to enhance access controls and data governance, and support auditing. @@ -113,7 +113,7 @@ For detail about physical security in GCP data centers, see [Google's Compliance ### Microsoft Azure -In Microsoft Azure, InfluxDB Cloud Dedicated uses Azure Kubernetes Service (AKS) +In Microsoft Azure, {{% product-name %}} uses Azure Kubernetes Service (AKS) and Azure Virtual Machines to deploy individual cluster components. To support auditing and authorization control within Azure, clusters are deployed into dedicated VNets within each region. @@ -123,9 +123,9 @@ For detail about physical security within Microsoft Azure data centers, see [Mic ### Data encryption -InfluxDB Cloud Dedicated enforces TLS encryption for data in transit from all +{{% product-name %}} enforces TLS encryption for data in transit from all clients, including Telegraf agents, browsers, and custom applications. -TLS 1.2 is the minimum TLS version allowed by InfluxDB Cloud Dedicated, including Granite server and management cluster TLS termination. +TLS 1.2 is the minimum TLS version allowed by {{% product-name %}}, including Granite server and management cluster TLS termination. Requests using TLS 1.1 or earlier are rejected. By default, data at rest is encrypted using strong encryption methods (AES-256) @@ -145,11 +145,11 @@ InfluxData maintains the following application and service security controls: - Multi-factor authentication (MFA) is required for all infrastructure (AWS, GCP, and Azure) and for other production systems with access to user information (see [InfluxData Subprocessors](https://www.influxdata.com/legal/influxdata-subprocessors/)). -- InfluxDB Cloud Dedicated access is logged and audited regularly. +- {{% product-name %}} access is logged and audited regularly. ### Configuration management -InfluxDB Cloud Dedicated is programmatically managed and deployed using +{{% product-name %}} is programmatically managed and deployed using “infrastructure as code” which undergoes version control and testing as part of the automated deployment process. Permission to push code is tightly controlled, @@ -191,7 +191,7 @@ Dedicated environments. for event analysis, capacity planning, alerting, and instrumentation. Access to these logs and operator interfaces is controlled by group access permissions, and provided only to teams that require access to deliver - InfluxDB Cloud Dedicated services. + {{% product-name %}} services. ### Security assessments @@ -212,7 +212,7 @@ The Business Continuity Plan and Disaster Recovery Plan are updated annually. ### Data durability -Data is replicated within multiple storage engines of InfluxDB Cloud Dedicated. +Data is replicated within multiple storage engines of {{% product-name %}}. The replication mechanism executes a serializable upsert and delete stream against all replicas and runs background entropy detection processes to identify diverged replicas. @@ -233,7 +233,9 @@ Users can configure the following security controls: ### Access, authentication, and authorization -InfluxDB Cloud Dedicated uses [Auth0](https://auth0.com/) for authentication and separates workload cluster management authorizations (using _management tokens_) from database read and write authorizations (using _database tokens_). +{{< product-name >}} uses [Auth0](https://auth0.com/) for authentication and +separates workload cluster management authorizations (using _management tokens_) +from database read and write authorizations (using _database tokens_). - [User provisioning](#user-provisioning) - [Management tokens](#management-tokens) @@ -241,23 +243,32 @@ InfluxDB Cloud Dedicated uses [Auth0](https://auth0.com/) for authentication and #### User provisioning -InfluxData uses Auth0 to create user accounts and assign permission sets to user accounts on the InfluxDB Cloud Dedicated system. +InfluxData uses [Auth0](https://auth0.com/) to create user accounts and assign +permission sets to user accounts on {{% product-name %}}. After a user account is created, InfluxData provides the user with the following: - An **Auth0 login** to authenticate access to the cluster -- The InfluxDB Cloud Dedicated **account ID** -- The InfluxDB Cloud Dedicated **cluster ID** -- The InfluxDB Cloud Dedicated **cluster URL** +- The {{% product-name %}} **account ID** +- The {{% product-name %}} **cluster ID** +- The {{% product-name %}} **cluster URL** - A password reset email for setting the login password -With a valid password, the user can login via InfluxData's `influxctl` command line tool. -The login command initiates an Auth0 browser login so that the password is never exchanged with `influxctl`. -With a successful authentication to Auth0, InfluxDB Cloud Dedicated provides the user's `influxctl` session with a short-lived [management token](#management-tokens) for access to the Granite service. -The user interacts with the `influxctl` command line tool to manage the workload cluster, including creating [database tokens](#database-tokens) for database read and write access. +With a valid password, the user can login by invoking one of the +[`influxctl` commands](/influxdb/cloud-dedicated/reference/influxctl/). +The command initiates an Auth0 browser login so that the password is never +exchanged with `influxctl`. +After a successful Auth0 authentication, {{% product-name %}} provides the +user's `influxctl` session with a short-lived +[management token](#management-tokens) for access to the Granite service. +The user interacts with the `influxctl` command line tool to manage the workload +cluster, including creating [database tokens](#database-tokens) for database +read and write access and [creating long-lived management tokens](/influxdb/cloud-dedicated/admin/management-tokens/) +for use with the [Management API](/influxdb/cloud-dedicated/api/management/). #### Management tokens -Management tokens authenticate user accounts to the Granite service and provide authorizations for workload cluster management activities, including: +Management tokens authenticate user accounts to the Granite service and provide +authorizations for workload cluster management activities, including: - account, pricing, and infrastructure management - inviting, listing, and deleting users @@ -268,19 +279,51 @@ Management tokens consist of the following: - An access token string (sensitive) - A permission set for management activities (configured during user provisioning) -- A mandatory 1 hour expiration +- A mandatory 1 hour expiration for tokens generated by logging in to `influxctl` -When a user issues a command using the `influxctl` command-line tool, `influxctl` sends the management token string with the request to the server, where Granite validates the token (for example, using Auth0). -If the management token is valid and not expired, the service then compares the token's permissions against the permissions needed to complete the user's request. +When a user issues a command using the `influxctl` command-line tool, +`influxctl` sends the management token string with the request to the server, +where Granite validates the token (for example, using Auth0). +If the management token is valid and not expired, the service then compares the +token's permissions against the permissions needed to complete the user's request. -Only valid unexpired tokens that have the necessary permission sets are authorized to perform management functions with InfluxDB Cloud Dedicated. -Following security best practice, management tokens are never stored on InfluxDB Cloud Dedicated (Granite or workload cluster) servers, which prevents token theft from the server. -On the client (the user's system), the management token is stored on disk with restricted permissions for `influxctl` to use on subsequent runs. -For example, a user's Linux system would store the management token at `~/.cache/influxctl/*.json` with `0600` permissions (that is, owner read and write, and no access for _group_ or _other_). +Only valid unexpired tokens that have the necessary permission sets are +authorized to perform management functions with {{% product-name %}}. +Following security best practice, management tokens are never stored on +{{% product-name %}} (Granite or workload cluster) servers, which prevents token +theft from the server. +On the client (the user's system), the management token is stored on disk with +restricted permissions for `influxctl` to use on subsequent runs. +For example, a user's Linux system would store the management token at +`~/.cache/influxctl/*.json` with `0600` permissions +(that is, owner read and write, and no access for _group_ or _other_). + +##### Management tokens and the Management API + +A user associated with the cluster and authorized through OAuth may use +`influxctl` to +[manually create and revoke management tokens](/influxdb/cloud-dedicated/admin/tokens/management/) +for automation use +cases--for example, using the [Management API for +{{% product-name %}}](/influxdb/cloud-dedicated/api/management/) to rotate +database tokens or create tables. + +To authenticate a Management API request, the user passes the manually created +token in the HTTP `Authorization` header: + +```HTTP +Authorization MANAGEMENT_TOKEN +``` + +A manually created management token has an optional expiration and +doesn't require human interaction with the OAuth provider. + +Manually created management tokens are meant for automation use cases +and shouldn't be used to circumvent the OAuth provider. #### Database tokens -Database tokens provide authorization for users and client applications to read and write data and metadata in an InfluxDB Cloud Dedicated database. +Database tokens provide authorization for users and client applications to read and write data and metadata in an {{% product-name %}} database. All data write and query API requests require a valid database token with sufficient permissions. _**Note:** an all-access management token can't read or write to a database because it's not a database token._ @@ -291,13 +334,13 @@ Database tokens consist of the following: - A permission set for reading from a database, writing to a database, or both - An API key string (sensitive, with the format apiv_) -When a user successfully creates a database token, the InfluxDB Cloud Dedicated Granite server reveals the new database token to the user as an API key string--the key string is only visible when it's created. +When a user successfully creates a database token, the {{% product-name %}} Granite server reveals the new database token to the user as an API key string--the key string is only visible when it's created. The user is responsible for securely storing and managing the API key string. -Following security best practice, a database token's raw API key string is never stored on InfluxDB Cloud Dedicated (Granite or workload cluster) servers, which prevents token theft from the server. +Following security best practice, a database token's raw API key string is never stored on {{% product-name %}} (Granite or workload cluster) servers, which prevents token theft from the server. The servers store non-sensitive database token attributes (identifier, description, and permission set) and the SHA-512 of the token API key string. When a user provides the API key as part of a request to the workload cluster, the cluster validates the token's SHA-512 against the stored SHA-512. -If the database token is valid, InfluxDB Cloud Dedicated compares the token's permissions against the permissions needed to complete the user's request. +If the database token is valid, {{% product-name %}} compares the token's permissions against the permissions needed to complete the user's request. The request is only authorized if it contains a valid token with the necessary permission set. ##### Token rotation @@ -310,7 +353,7 @@ To rotate a token, a user deletes the database token and issues a new one. InfluxDB Cloud accounts support multiple users in an organization. By default, each user with the *Owner* role has full permissions on resources -in your InfluxDB Cloud Dedicated cluster. +in your {{% product-name %}} cluster. ### Advanced controls From f6a9e72c5087df2495cc4c7d822daf172a91235a Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 1 Jul 2024 19:49:37 -0500 Subject: [PATCH 06/96] chore(v3): Test influxctl token create --- .../cloud-dedicated/get-started/setup.md | 28 +++++- .../influxdb/clustered/get-started/setup.md | 86 +++++++++++++++---- 2 files changed, 94 insertions(+), 20 deletions(-) diff --git a/content/influxdb/cloud-dedicated/get-started/setup.md b/content/influxdb/cloud-dedicated/get-started/setup.md index d91c700ee..89d4fbeb2 100644 --- a/content/influxdb/cloud-dedicated/get-started/setup.md +++ b/content/influxdb/cloud-dedicated/get-started/setup.md @@ -51,10 +51,10 @@ databases and tokens. 1. [Download and install the `influxctl` CLI](/influxdb/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl). -2. **Create a connection profile and provide your {{< product-name omit="Clustered" >}} connection credentials**. +2. **Create a connection profile and provide your {{< product-name >}} connection credentials**. The `influxctl` CLI uses [connection profiles](/influxdb/cloud-dedicated/reference/cli/influxctl/#configure-connection-profiles) - to connect to and authenticate with your InfluxDB Cloud Dedicated cluster. + to connect to and authenticate with your {{< product-name omit="Clustered" >}} cluster. Create a file named `config.toml` at the following location depending on your operating system. @@ -144,6 +144,10 @@ Provide the following: - `--write-database` Grants write access to a database - Token description + + + + {{% code-placeholders "get-started" %}} ```sh @@ -155,6 +159,26 @@ influxctl token create \ {{% /code-placeholders %}} + + The command returns the token ID and the token string. Store the token string in a safe place. You'll need it later. diff --git a/content/influxdb/clustered/get-started/setup.md b/content/influxdb/clustered/get-started/setup.md index c966ec295..39a4c9b9c 100644 --- a/content/influxdb/clustered/get-started/setup.md +++ b/content/influxdb/clustered/get-started/setup.md @@ -29,15 +29,14 @@ you need is in place. ## Install and configure your InfluxDB cluster -{{% note %}} -_InfluxDB Clustered installation instructions are coming soon._ -{{% /note %}} +Follow the [Install InfluxDB Clustered](/influxdb/clustered/install/) guide to +install prerequisites and set up your cluster. ## Download, install, and configure the influxctl CLI - + The [`influxctl` CLI](/influxdb/clustered/reference/cli/influxctl/) -provides a simple way to manage your {{< product-name omit="Clustered" >}} cluster from a -command line. It lets you perform administrative tasks such as managing +lets you manage your {{< product-name omit="Clustered" >}} cluster from a +command line and perform administrative tasks such as managing databases and tokens. 1. [Download and install the `influxctl` CLI](/influxdb/clustered/reference/cli/influxctl/#download-and-install-influxctl). @@ -57,13 +56,16 @@ databases and tokens. | Windows | `%APPDATA%\influxctl\config.toml` | {{% note %}} + If stored at a non-default location, include the `--config` flag with each `influxctl` command and provide the path to your profile configuration file. + {{% /note %}} - **Copy and paste the sample configuration profile code** into your `config.toml`: +1. **Copy and paste the sample configuration profile code** into your `config.toml`: {{% code-placeholders "PORT|OAUTH_TOKEN_URL|OAUTH_DEVICE_URL|OAUTH_CLIENT_ID" %}} + ```toml [[profile]] name = "default" @@ -77,28 +79,32 @@ If stored at a non-default location, include the `--config` flag with each token_url = "OAUTH_TOKEN_URL" device_url = "OAUTH_DEVICE_URL" ``` + {{% /code-placeholders %}} Replace the following with your {{< product-name >}} credentials: - - {{% code-placeholder-key %}}`PORT`{{% /code-placeholder-key %}}: the port to use to access your InfluxDB cluster - - {{% code-placeholder-key %}}`OAUTH_CLIENT_ID`{{% /code-placeholder-key %}}: the client URL of your OAuth2 provider - (for example: `https://identityprovider/oauth2/v2/token`) - - {{% code-placeholder-key %}}`OAUTH_DEVICE_ID`{{% /code-placeholder-key %}}: the device URL of your OAuth2 provider - (for example: `https://identityprovider/oauth2/v2/auth/device`) + +- {{% code-placeholder-key %}}`PORT`{{% /code-placeholder-key %}}: the port to use to access your InfluxDB cluster +- {{% code-placeholder-key %}}`OAUTH_CLIENT_ID`{{% /code-placeholder-key %}}: the client URL of your OAuth2 provider +(for example: `https://identityprovider/oauth2/v2/token`) +- {{% code-placeholder-key %}}`OAUTH_DEVICE_ID`{{% /code-placeholder-key %}}: the device URL of your OAuth2 provider +(for example: `https://identityprovider/oauth2/v2/auth/device`) _For detailed information about `influxctl` profiles, see [Configure connection profiles](/influxdb/clustered/reference/cli/influxctl/#configure-connection-profiles)_. ## Create a database -Use the [`influxctl database create` command](/influxdb/clustered/reference/cli/influxctl/database/create/) +Use the +[`influxctl database create` command](/influxdb/clustered/reference/cli/influxctl/database/create/) to create a database. You can use an existing database or create a new one specifically for this getting started tutorial. -_Examples in this getting started tutorial assume a database named **"get-started"**._ +_Examples in this getting started tutorial assume a database named `get-started`._ {{% note %}} + #### Authenticate with your cluster - + The first time you run an `influxctl` CLI command, you are directed to login to your **OAuth provider**. Once logged in, your OAuth provider issues a short-lived (1 hour) management token for the `influxctl` CLI that grants @@ -108,18 +114,25 @@ administrative access to your {{< product-name omit="Clustered" >}} cluster. Provide the following: - Database name. -- _Optional:_ Database [retention period](/influxdb/clustered/admin/databases/#retention-periods) +- _Optional:_ Database + [retention period](/influxdb/clustered/admin/databases/#retention-periods) as a duration value. If no retention period is specified, the default is infinite. + + + {{% code-placeholders "get-started|1y" %}} + ```sh influxctl database create --retention-period 1y get-started ``` + {{% /code-placeholders %}} ## Create a database token -Use the [`influxctl token create` command](/influxdb/clustered/reference/cli/influxctl/token/create/) +Use the +[`influxctl token create` command](/influxdb/clustered/reference/cli/influxctl/token/create/) to create a database token with read and write permissions for your database. Provide the following: @@ -129,15 +142,39 @@ Provide the following: - `--write-database` Grants write access to a database - Token description + + + {{% code-placeholders "get-started" %}} + ```sh influxctl token create \ --read-database get-started \ --write-database get-started \ "Read/write token for get-started database" ``` + {{% /code-placeholders %}} + The command returns the token ID and the token string. Store the token string in a safe place. @@ -145,6 +182,7 @@ You'll need it later. **This is the only time the token string is available in plain text.** {{% note %}} + #### Store secure tokens in a secret store Token strings are returned _only_ on token creation. @@ -159,44 +197,56 @@ Code samples in later sections assume you assigned the token string to an {{< tabs-wrapper >}} {{% tabs %}} -[macOS and Linux](#) +[MacOS and Linux](#) [PowerShell](#) [CMD](#) {{% /tabs %}} {{% tab-content %}} + {{% code-placeholders "DATABASE_TOKEN" %}} + ```sh export INFLUX_TOKEN=DATABASE_TOKEN ``` + {{% /code-placeholders %}} + {{% /tab-content %}} {{% tab-content %}} + {{% code-placeholders "DATABASE_TOKEN" %}} + ```powershell $env:INFLUX_TOKEN = "DATABASE_TOKEN" ``` + {{% /code-placeholders %}} + {{% /tab-content %}} {{% tab-content %}} + {{% code-placeholders "DATABASE_TOKEN" %}} + ```sh set INFLUX_TOKEN=DATABASE_TOKEN # Make sure to include a space character at the end of this command. ``` + {{% /code-placeholders %}} + {{% /tab-content %}} {{< /tabs-wrapper >}} From 478cb393325fedbab439dc7c4ea4ad856df3f355 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 1 Jul 2024 21:30:12 -0500 Subject: [PATCH 07/96] chore(v3): Skip tests for influxctl database create. Fix influxctl reference. --- .../cloud-dedicated/get-started/setup.md | 1 - .../cli/influxctl/database/create.md | 41 ++++++++++++++++--- .../cli/influxctl/database/create.md | 37 +++++++++++++++-- 3 files changed, 68 insertions(+), 11 deletions(-) diff --git a/content/influxdb/cloud-dedicated/get-started/setup.md b/content/influxdb/cloud-dedicated/get-started/setup.md index 89d4fbeb2..3b4f137b2 100644 --- a/content/influxdb/cloud-dedicated/get-started/setup.md +++ b/content/influxdb/cloud-dedicated/get-started/setup.md @@ -147,7 +147,6 @@ Provide the following: - {{% code-placeholders "get-started" %}} ```sh diff --git a/content/influxdb/cloud-dedicated/reference/cli/influxctl/database/create.md b/content/influxdb/cloud-dedicated/reference/cli/influxctl/database/create.md index 989dbe89c..bb15512a4 100644 --- a/content/influxdb/cloud-dedicated/reference/cli/influxctl/database/create.md +++ b/content/influxdb/cloud-dedicated/reference/cli/influxctl/database/create.md @@ -65,12 +65,29 @@ Be sure to follow [partitioning best practices](/influxdb/cloud-dedicated/admin/ If defining a custom partition template for your database with any of the `--template-*` flags, always include the `--template-timeformat` flag with a -time format to use in your partition template. Otherwise time will be omitted -from the partition template and partitions won't be able to be compacted. +time format to use in your partition template. +Otherwise, InfluxDB omits time from the partition template and won't compact partitions. {{% /note %}} +{{% warn %}} +#### Cannot reuse deleted database names + +You cannot reuse the name of a deleted database when creating a new database. +If you try to reuse the name, the API response status code +is `400` and the `message` field contains the following: + +```text +'iox_proxy.app.CreateDatabase failed to create database: \ +rpc error: code = AlreadyExists desc = A namespace with the +name `` already exists' +``` +{{% /warn %}} + ## Usage + + + ```sh influxctl database create [flags] ``` @@ -85,12 +102,12 @@ influxctl database create [flags] | Flag | | Description | | :--- | :---------------------- | :--------------------------------------------------------------------------------------------------------------------------------------- | -| | `--retention-period` | Database retention period (default is 0s or infinite) | -| | `--max-tables` | Maximum tables per database (default is 500, 0 uses default) | -| | `--max-columns` | Maximum columns per table (default is 250, 0 uses default) | +| | `--retention-period` | Database retention period (default is `0s`, infinite) | +| | `--max-tables` | Maximum tables per database (default is 500, `0` uses default) | +| | `--max-columns` | Maximum columns per table (default is 250, `0` uses default) | | | `--template-tag` | Tag to add to partition template (can include multiple of this flag) | | | `--template-tag-bucket` | Tag and number of buckets to partition tag values into separated by a comma--for example: `tag1,100` (can include multiple of this flag) | -| | `--template-timeformat` | Timestamp format for partition template | +| | `--template-timeformat` | Timestamp format for partition template (default is `%Y-%m-%d`) | | `-h` | `--help` | Output command help | {{% caption %}} @@ -106,12 +123,18 @@ _Also see [`influxctl` global flags](/influxdb/cloud-dedicated/reference/cli/inf ### Create a database with an infinite retention period + + + ```sh influxctl database create mydb ``` ### Create a database with a 30-day retention period + + + ```sh influxctl database create \ --retention-period 30d \ @@ -120,6 +143,9 @@ influxctl database create \ ### Create a database with non-default table and column limits + + + ```sh influxctl database create \ --max-tables 200 \ @@ -133,6 +159,9 @@ The following example creates a new `mydb` database and applies a partition template that partitions by two tags (`room` and `sensor-type`) and by week using the time format `%Y wk:%W`: + + + ```sh influxctl database create \ --template-tag room \ diff --git a/content/influxdb/clustered/reference/cli/influxctl/database/create.md b/content/influxdb/clustered/reference/cli/influxctl/database/create.md index 27cb11745..1767fbbf2 100644 --- a/content/influxdb/clustered/reference/cli/influxctl/database/create.md +++ b/content/influxdb/clustered/reference/cli/influxctl/database/create.md @@ -68,8 +68,25 @@ time format to use in your partition template. Otherwise, InfluxDB omits time from the partition template and won't compact partitions. {{% /note %}} +{{% warn %}} +#### Cannot reuse deleted database names + +You cannot reuse the name of a deleted database when creating a new database. +If you try to reuse the name, the API response status code +is `400` and the `message` field contains the following: + +```text +'iox_proxy.app.CreateDatabase failed to create database: \ +rpc error: code = AlreadyExists desc = A namespace with the +name `` already exists' +``` +{{% /warn %}} + ## Usage + + + ```sh influxctl database create [flags] ``` @@ -84,12 +101,12 @@ influxctl database create [flags] | Flag | | Description | | :--- | :---------------------- | :--------------------------------------------------------------------------------------------------------------------------------------- | -| | `--retention-period` | Database retention period (default is 0s or infinite) | -| | `--max-tables` | Maximum tables per database (default is 500, 0 uses default) | -| | `--max-columns` | Maximum columns per table (default is 250, 0 uses default) | +| | `--retention-period` | Database retention period (default is `0s`, infinite) | +| | `--max-tables` | Maximum tables per database (default is 500, `0` uses default) | +| | `--max-columns` | Maximum columns per table (default is 250, `0` uses default) | | | `--template-tag` | Tag to add to partition template (can include multiple of this flag) | | | `--template-tag-bucket` | Tag and number of buckets to partition tag values into separated by a comma--for example: `tag1,100` (can include multiple of this flag) | -| | `--template-timeformat` | Timestamp format for partition template | +| | `--template-timeformat` | Timestamp format for partition template (default is `%Y-%m-%d`) | | `-h` | `--help` | Output command help | {{% caption %}} @@ -105,12 +122,18 @@ _Also see [`influxctl` global flags](/influxdb/clustered/reference/cli/influxctl ### Create a database with an infinite retention period + + + ```sh influxctl database create mydb ``` ### Create a database with a 30-day retention period + + + ```sh influxctl database create \ --retention-period 30d \ @@ -119,6 +142,9 @@ influxctl database create \ ### Create a database with non-default table and column limits + + + ```sh influxctl database create \ --max-tables 200 \ @@ -132,6 +158,9 @@ The following example creates a new `mydb` database and applies a partition template that partitions by two tags (`room` and `sensor-type`) and by week using the time format `%Y wk:%W`: + + + ```sh influxctl database create \ --template-tag room \ From 23816d9a7e2198b1d7ab9fbebde142b3b962aa0b Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 2 Jul 2024 09:29:16 -0500 Subject: [PATCH 08/96] fix(v3): Skip tests for creating databases; namespaces aren't reusable. --- .../vocabularies/InfluxDataDocs/accept.txt | 2 +- .../cloud-dedicated/admin/databases/create.md | 48 ++++++++++++------- .../clustered/admin/databases/create.md | 21 +++++--- 3 files changed, 48 insertions(+), 23 deletions(-) diff --git a/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt index 2d7ef17b4..5eb80d548 100644 --- a/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt +++ b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt @@ -7,7 +7,7 @@ Anaconda Apache Superset Arrow AuthToken -CLI +CLI|\/cli\/ CSV Data Explorer Dedup diff --git a/content/influxdb/cloud-dedicated/admin/databases/create.md b/content/influxdb/cloud-dedicated/admin/databases/create.md index 54ff45460..a40c1bc15 100644 --- a/content/influxdb/cloud-dedicated/admin/databases/create.md +++ b/content/influxdb/cloud-dedicated/admin/databases/create.md @@ -10,20 +10,22 @@ menu: parent: Manage databases weight: 201 list_code_example: | + ##### CLI ```sh influxctl database create \ --retention-period 30d \ --max-tables 500 \ --max-columns 250 \ - + DATABASE_NAME ``` + ##### API ```sh curl \ --location "https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" \ - --request POST + --request POST \ --header "Accept: application/json" \ --header 'Content-Type: application/json' \ --header "Authorization: Bearer MANAGEMENT_TOKEN" \ @@ -103,6 +105,9 @@ to create a database in your {{< product-name omit=" Clustered" >}} cluster. _{{< product-name >}} supports up to 7 total tags or tag buckets in the partition template._ {{% /note %}} + + + {{% code-placeholders "DATABASE_NAME|30d|500|100|300|(TAG_KEY(_\d)?)" %}} ```sh @@ -127,11 +132,15 @@ Replace the following in your command: ## Database attributes -- [Retention period syntax (influxctl CLI)](#retention-period-syntax-influxctl-cli) -- [Custom partitioning (influxctl CLI)](#custom-partitioning-influxctl-cli) -- [Database naming restrictions](#database-naming-restrictions) -- [InfluxQL DBRP naming convention](#influxql-dbrp-naming-convention) -- [Table and column limits](#table-and-column-limits) +- [Database attributes](#database-attributes) + - [Retention period syntax (influxctl CLI)](#retention-period-syntax-influxctl-cli) + - [Custom partitioning (influxctl CLI)](#custom-partitioning-influxctl-cli) +- [Database attributes](#database-attributes-1) + - [Retention period syntax (Management API)](#retention-period-syntax-management-api) + - [Custom partitioning (Management API)](#custom-partitioning-management-api) + - [Database naming restrictions](#database-naming-restrictions) + - [InfluxQL DBRP naming convention](#influxql-dbrp-naming-convention) + - [Table and column limits](#table-and-column-limits) ### Retention period syntax (influxctl CLI) @@ -190,7 +199,7 @@ For more information, see [Manage data partitioning](/influxdb/cloud-dedicated/a #### Partition templates can only be applied on create You can only apply a partition template when creating a database. -There is no way to update a partition template on an existing database. +You can't update a partition template on an existing database. {{% /note %}} @@ -237,12 +246,15 @@ _{{< product-name >}} supports up to 7 total tags or tag buckets in the partitio The following example shows how to use the Management API to create a database with custom partitioning: + + + {{% code-placeholders "DATABASE_NAME|2592000000000|500|100|300|250|ACCOUNT_ID|CLUSTER_ID|MANAGEMENT_TOKEN|(TAG_KEY(_\d)?)" %}} ```sh curl \ --location "https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" \ - --request POST + --request POST \ --header "Accept: application/json" \ --header 'Content-Type: application/json' \ --header "Authorization: Bearer MANAGEMENT_TOKEN" \ @@ -294,11 +306,15 @@ Replace the following in your request: ## Database attributes -- [Retention period syntax (Management API)](#retention-period-syntax-management-api) -- [Custom partitioning (Management API)](#custom-partitioning-management-api) -- [Database naming restrictions](#database-naming-restrictions) -- [InfluxQL DBRP naming convention](#influxql-dbrp-naming-convention) -- [Table and column limits](#table-and-column-limits) +- [Database attributes](#database-attributes) + - [Retention period syntax (influxctl CLI)](#retention-period-syntax-influxctl-cli) + - [Custom partitioning (influxctl CLI)](#custom-partitioning-influxctl-cli) +- [Database attributes](#database-attributes-1) + - [Retention period syntax (Management API)](#retention-period-syntax-management-api) + - [Custom partitioning (Management API)](#custom-partitioning-management-api) + - [Database naming restrictions](#database-naming-restrictions) + - [InfluxQL DBRP naming convention](#influxql-dbrp-naming-convention) + - [Table and column limits](#table-and-column-limits) ### Retention period syntax (Management API) @@ -334,7 +350,7 @@ For more information, see [Manage data partitioning](/influxdb/cloud-dedicated/a #### Partition templates can only be applied on create You can only apply a partition template when creating a database. -There is no way to update a partition template on an existing database. +You can't update a partition template on an existing database. {{% /note %}} @@ -364,7 +380,7 @@ database and retention policy (DBRP) to be queryable with InfluxQL. **When naming a database that you want to query with InfluxQL**, use the following naming convention to automatically map v1 DBRP combinations to an {{% product-name %}} database: -```sh +```text database_name/retention_policy_name ``` diff --git a/content/influxdb/clustered/admin/databases/create.md b/content/influxdb/clustered/admin/databases/create.md index 5ad5598ed..9b1d7a8b7 100644 --- a/content/influxdb/clustered/admin/databases/create.md +++ b/content/influxdb/clustered/admin/databases/create.md @@ -1,20 +1,22 @@ --- title: Create a database description: > - Use the [`influxctl database create` command](/influxdb/clustered/reference/cli/influxctl/database/create/) - to create a new InfluxDB database in your InfluxDB cluster. + Use the [`influxctl database create` command](/influxdb/clustered/reference/cli/influxctl/database/create/) to create a new InfluxDB database in your InfluxDB cluster. Provide a database name and an optional retention period. menu: influxdb_clustered: parent: Manage databases weight: 201 list_code_example: | + + ##### CLI + ```sh influxctl database create \ --retention-period 30d \ --max-tables 500 \ --max-columns 250 \ - + DATABASE_NAME ``` related: - /influxdb/clustered/reference/cli/influxctl/database/create/ @@ -44,7 +46,11 @@ to create a database in your {{< product-name omit=" Clustered" >}} cluster. _{{< product-name >}} supports up to 7 total tags or tag buckets in the partition template._ {{% /note %}} + + + {{% code-placeholders "DATABASE_NAME|30d|500|200" %}} + ```sh influxctl database create \ --retention-period 30d \ @@ -57,13 +63,16 @@ influxctl database create \ --template-timeformat '%Y-%m-%d' \ DATABASE_NAME ``` + {{% /code-placeholders %}} - [Retention period syntax](#retention-period-syntax) - [Database naming restrictions](#database-naming-restrictions) - [InfluxQL DBRP naming convention](#influxql-dbrp-naming-convention) - [Table and column limits](#table-and-column-limits) -- [Custom partitioning](#custom-partitioning) + - [Table limit](#table-limit) + - [Column limit](#column-limit) + - [Custom partitioning](#custom-partitioning) ## Retention period syntax @@ -127,7 +136,7 @@ database and retention policy (DBRP) to be queryable with InfluxQL. **When naming a database that you want to query with InfluxQL**, use the following naming convention to automatically map v1 DBRP combinations to an {{% product-name %}} database: -```sh +```text database_name/retention_policy_name ``` @@ -225,5 +234,5 @@ For more information, see [Manage data partitioning](/influxdb/clustered/admin/c #### Partition templates can only be applied on create You can only apply a partition template when creating a database. -There is no way to update a partition template on an existing database. +You can't update a partition template on an existing database. {{% /note %}} From 2591db73155367126bcd432211cdc4c1255b3da0 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 2 Jul 2024 10:47:06 -0500 Subject: [PATCH 09/96] fix(v3): Don't use test monitoring script when running a container with -t (TTY). Fix update example. --- .husky/pre-commit | 7 +++++-- .../cloud-dedicated/admin/databases/update.md | 13 +++++++------ 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/.husky/pre-commit b/.husky/pre-commit index 6e0acb87e..9b223fbff 100644 --- a/.husky/pre-commit +++ b/.husky/pre-commit @@ -1,3 +1,6 @@ -./test/src/monitor-tests.sh start +# If you're running tests in a container that doesn't support TTY (docker run -t), you can use monitor-tests.sh to open URLs in the host's browser. +# Your test needs to redirect the URL to the test/urls.txt file--for example: +# influxctl database update /dev/null > test/urls.txt +# ./test/src/monitor-tests.sh start npx lint-staged --relative -./test/src/monitor-tests.sh kill +# ./test/src/monitor-tests.sh kill diff --git a/content/influxdb/cloud-dedicated/admin/databases/update.md b/content/influxdb/cloud-dedicated/admin/databases/update.md index a025f1540..a8a969659 100644 --- a/content/influxdb/cloud-dedicated/admin/databases/update.md +++ b/content/influxdb/cloud-dedicated/admin/databases/update.md @@ -15,8 +15,8 @@ list_code_example: | influxctl database update \ --retention-period 30d \ --max-tables 500 \ - --max-columns 250 - + --max-columns 250 \ + DATABASE_NAME ``` ##### API @@ -64,10 +64,11 @@ to update a database in your {{< product-name omit=" Clustered" >}} cluster. {{% code-placeholders "DATABASE_NAME|30d|500|200" %}} ```sh -influxctl database update DATABASE_NAME \ +influxctl database update \ --retention-period 30d \ --max-tables 500 \ - --max-columns 250 + --max-columns 250 \ + DATABASE_NAME ``` {{% /code-placeholders %}} @@ -217,7 +218,7 @@ database to apply updates to. The database name itself can't be updated. #### Partition templates can't be updated You can only apply a partition template when creating a database. -There is no way to update a partition template on an existing database. +You can't update a partition template on an existing database. {{% /warn %}} ### Database naming restrictions @@ -243,7 +244,7 @@ database and retention policy (DBRP) to be queryable with InfluxQL. **When naming a database that you want to query with InfluxQL**, use the following naming convention to automatically map v1 DBRP combinations to a database: -```sh +```text database_name/retention_policy_name ``` From e4b32fbec62ab0448aa7a67c1b0c8e9be2a1da40 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 2 Jul 2024 11:21:39 -0500 Subject: [PATCH 10/96] fix(v3): database update formatting --- .../cloud-dedicated/admin/databases/update.md | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/content/influxdb/cloud-dedicated/admin/databases/update.md b/content/influxdb/cloud-dedicated/admin/databases/update.md index a8a969659..d7d762c6a 100644 --- a/content/influxdb/cloud-dedicated/admin/databases/update.md +++ b/content/influxdb/cloud-dedicated/admin/databases/update.md @@ -56,8 +56,8 @@ to update a database in your {{< product-name omit=" Clustered" >}} cluster. 2. In your terminal, run the `influxctl database update` command and provide the following: - Database name - - _Optional_: Database [retention period](/influxdb/cloud-dedicated/admin/databases/#retention-periods) - Default is `infinite` (`0`). + - _Optional_: Database [retention period](/influxdb/cloud-dedicated/admin/databases/#retention-periods). + Default is infinite (`0`). - _Optional_: Database table (measurement) limit. Default is `500`. - _Optional_: Database column limit. Default is `250`. @@ -77,6 +77,13 @@ Replace the following in your command: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: your {{% product-name %}} [database](/influxdb/cloud-dedicated/admin/databases/) +{{% warn %}} +#### Database names can't be updated + +The `influxctl database update` command uses the database name to identify which +database to apply updates to. The database name itself can't be updated. +{{% /warn %}} + ## Database attributes - [Retention period syntax](#retention-period-syntax-influxctl-cli) @@ -92,7 +99,7 @@ for the database. The retention period value is a time duration value made up of a numeric value plus a duration unit. For example, `30d` means 30 days. -A zero duration (`0d`) retention period is infinite and data won't expire. +A zero duration (for example, `0s` or `0d`) retention period is infinite and data won't expire. The retention period value cannot be negative or contain whitespace. {{< flex >}} @@ -212,8 +219,10 @@ The retention period value cannot be negative or contain whitespace. #### Database names can't be updated -The `influxctl database update` command uses the database name to identify which -database to apply updates to. The database name itself can't be updated. +The Management API `PATCH /api/v0/database` endpoint and +the`influxctl database update` command use the database name to identify which +database to apply updates to. +The database name itself can't be updated. #### Partition templates can't be updated @@ -235,7 +244,7 @@ Database names must adhere to the following naming restrictions: In InfluxDB 1.x, data is stored in [databases](/influxdb/v1/concepts/glossary/#database) and [retention policies](/influxdb/v1/concepts/glossary/#retention-policy-rp). -In InfluxDB Cloud Dedicated, databases and retention policies have been merged into +In {{< product-name >}}, databases and retention policies have been merged into _databases_, where databases have a retention period, but retention policies are no longer part of the data model. Because InfluxQL uses the 1.x data model, a database must be mapped to a v1 From b5a8e2b7b3e5501feadad15433837c5bce4ad21c Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 2 Jul 2024 11:23:08 -0500 Subject: [PATCH 11/96] fix(v3): Clustered update database --- .../clustered/admin/databases/update.md | 55 +++++++++++-------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/content/influxdb/clustered/admin/databases/update.md b/content/influxdb/clustered/admin/databases/update.md index 72a3008de..99534f0ad 100644 --- a/content/influxdb/clustered/admin/databases/update.md +++ b/content/influxdb/clustered/admin/databases/update.md @@ -9,10 +9,11 @@ menu: weight: 201 list_code_example: | ```sh - influxctl database update DATABASE_NAME \ + influxctl database update \ --retention-period 30d \ --max-tables 500 \ - --max-columns 250 + --max-columns 250 \ + DATABASE_NAME ``` related: - /influxdb/clustered/reference/cli/influxctl/database/update/ @@ -25,20 +26,27 @@ to update a database in your {{< product-name omit=" Clustered" >}} cluster. 2. Run the `influxctl database update` command and provide the following: - Database name - - _Optional_: Database [retention period](/influxdb/clustered/admin/databases/#retention-periods) - _(default is infinite)_ - - _Optional_: Database table (measurement) limit _(default is 500)_ - - _Optional_: Database column limit _(default is 250)_ + - _Optional_: Database [retention period](/influxdb/cloud-dedicated/admin/databases/#retention-periods). + Default is infinite (`0`). + - _Optional_: Database table (measurement) limit. Default is `500`. + - _Optional_: Database column limit. Default is `250`. {{% code-placeholders "DATABASE_NAME|30d|500|200" %}} + ```sh -influxctl database update DATABASE_NAME \ +influxctl database update \ --retention-period 30d \ --max-tables 500 \ - --max-columns 250 + --max-columns 250 \ + DATABASE_NAME ``` + {{% /code-placeholders %}} +Replace the following in your command: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: your {{% product-name %}} [database](/influxdb/clustered/admin/databases/) + {{% warn %}} #### Database names can't be updated @@ -46,25 +54,28 @@ The `influxctl database update` command uses the database name to identify which database to apply updates to. The database name itself can't be updated. {{% /warn %}} -- [Retention period syntax](#retention-period-syntax) +## Database attributes + +- [Retention period syntax](#retention-period-syntax-influxctl-cli) - [Database naming restrictions](#database-naming-restrictions) - [InfluxQL DBRP naming convention](#influxql-dbrp-naming-convention) - [Table and column limits](#table-and-column-limits) -## Retention period syntax +### Retention period syntax (influxctl CLI) Use the `--retention-period` flag to define a specific [retention period](/influxdb/clustered/admin/databases/#retention-periods) for the database. The retention period value is a time duration value made up of a numeric value -plus a duration unit. For example, `30d` means 30 days. -A zero duration retention period is infinite and data will not expire. +plus a duration unit. +For example, `30d` means 30 days. +A zero duration (for example, `0s` or `0d`) retention period is infinite and data won't expire. The retention period value cannot be negative or contain whitespace. {{< flex >}} -{{% flex-content %}} +{{% flex-content "half" %}} -##### Valid durations units include +#### Valid durations units include - **m**: minute - **h**: hour @@ -74,9 +85,9 @@ The retention period value cannot be negative or contain whitespace. - **y**: year {{% /flex-content %}} -{{% flex-content %}} +{{% flex-content "half" %}} -##### Example retention period values +#### Example retention period values - `0d`: infinite/none - `3d`: 3 days @@ -99,7 +110,7 @@ Database names must adhere to the following naming restrictions: - Should not start with an underscore (`_`). - Maximum length of 64 characters. -## InfluxQL DBRP naming convention +### InfluxQL DBRP naming convention In InfluxDB 1.x, data is stored in [databases](/influxdb/v1/concepts/glossary/#database) and [retention policies](/influxdb/v1/concepts/glossary/#retention-policy-rp). @@ -112,11 +123,11 @@ database and retention policy (DBRP) to be queryable with InfluxQL. **When naming a database that you want to query with InfluxQL**, use the following naming convention to automatically map v1 DBRP combinations to a database: -```sh +```text database_name/retention_policy_name ``` -##### Database naming examples +#### Database naming examples | v1 Database name | v1 Retention Policy name | New database name | | :--------------- | :----------------------- | :------------------------ | @@ -124,12 +135,12 @@ database_name/retention_policy_name | telegraf | autogen | telegraf/autogen | | webmetrics | 1w-downsampled | webmetrics/1w-downsampled | -## Table and column limits +### Table and column limits In {{< product-name >}}, table (measurement) and column limits can be configured using the `--max-tables` and `--max-columns` flags. -### Table limit +#### Table limit **Default maximum number of tables**: 500 @@ -172,7 +183,7 @@ operating cost of your cluster. {{% /expand %}} {{< /expand-wrapper >}} -### Column limit +#### Column limit **Default maximum number of columns**: 250 From 61acef15272e87b6c77dc26d658ea4dd8b033e78 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 2 Jul 2024 13:24:32 -0500 Subject: [PATCH 12/96] chore(v3): whitespace fix --- .lintstagedrc.mjs | 5 +---- content/influxdb/cloud-dedicated/get-started/write.md | 10 ++++++---- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/.lintstagedrc.mjs b/.lintstagedrc.mjs index 66a00a12f..c88b4ac20 100644 --- a/.lintstagedrc.mjs +++ b/.lintstagedrc.mjs @@ -41,10 +41,7 @@ function pytestStagedContent(paths, productPath) { // Instead of the plugin, we could use a placeholder test that always or conditionally passes. // Whether tests pass or fail, the container is removed, // but the CONTENT container and associated volume will remain until the next run. - // Note: the "--network host" setting and `host-open` script are used to - // forward influxctl authentication URLs from the container to the host - // where they can be opened and approved in a host browser. - // Allowing "--network host" has security implications and isn't ideal. + // Note: the docker run -t flag is required to allocate a pseudo-TTY for the container--required for opening influxctl OAuth URLs. `docker run --rm -t \ --label tag=influxdata-docs \ --label stage=test \ diff --git a/content/influxdb/cloud-dedicated/get-started/write.md b/content/influxdb/cloud-dedicated/get-started/write.md index 27e9267ba..ce37dcbcb 100644 --- a/content/influxdb/cloud-dedicated/get-started/write.md +++ b/content/influxdb/cloud-dedicated/get-started/write.md @@ -40,7 +40,8 @@ line protocol for you, but it's good to understand how line protocol works. All data written to InfluxDB is written using **line protocol**, a text-based format that lets you provide the necessary information to write a data point to -InfluxDB. _This tutorial covers the basics of line protocol, but for detailed +InfluxDB. +_This tutorial covers the basics of line protocol, but for detailed information, see the [Line protocol reference](/influxdb/cloud-dedicated/reference/syntax/line-protocol/)._ @@ -163,7 +164,8 @@ The following examples show how to write the preceding [sample data](#home-sensor-data-line-protocol), already in line protocol format, to an {{% product-name %}} database. -To learn more about available tools and options, see [Write data](/influxdb/cloud-dedicated/write-data/). +To learn more about available tools and options, see +[Write data](/influxdb/cloud-dedicated/write-data/). {{% note %}} Some examples in this getting started tutorial assume your InfluxDB @@ -243,12 +245,12 @@ home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200' If successful, the output is the success message; otherwise, error details and the failure message. - + {{% /tab-content %}} {{% tab-content %}} - + {{% influxdb/custom-timestamps %}} From dbaa790fdbac63883c2d27a038a50ee4b79c4633 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 2 Jul 2024 14:39:26 -0500 Subject: [PATCH 13/96] fix(v3): curl write examples --- .../cloud-dedicated/get-started/write.md | 12 +- .../cloud-serverless/get-started/write.md | 451 +++++++++--------- .../influxdb/clustered/get-started/write.md | 12 +- 3 files changed, 240 insertions(+), 235 deletions(-) diff --git a/content/influxdb/cloud-dedicated/get-started/write.md b/content/influxdb/cloud-dedicated/get-started/write.md index ce37dcbcb..636169281 100644 --- a/content/influxdb/cloud-dedicated/get-started/write.md +++ b/content/influxdb/cloud-dedicated/get-started/write.md @@ -447,7 +447,7 @@ to InfluxDB: {{% code-placeholders "DATABASE_TOKEN" %}} ```sh -response=$(curl --silent --write-out "%{response_code}:%{errormsg}" \ +response=$(curl --silent --write-out "%{response_code}:-%{errormsg}" \ "https://{{< influxdb/host >}}/write?db=get-started&precision=s" \ --header "Authorization: Bearer DATABASE_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ @@ -482,8 +482,8 @@ home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 ") # Format the response code and error message output. -response_code=${response%%:*} -errormsg=${response#*:} +response_code=${response%%:-*} +errormsg=${response#*:-} # Remove leading and trailing whitespace from errormsg errormsg=$(echo "${errormsg}" | tr -d '[:space:]') @@ -559,7 +559,7 @@ to InfluxDB: {{% code-placeholders "DATABASE_TOKEN"%}} ```sh -response=$(curl --silent --write-out "%{response_code}:%{errormsg}" \ +response=$(curl --silent --write-out "%{response_code}:-%{errormsg}" \ "https://{{< influxdb/host >}}/api/v2/write?bucket=get-started&precision=s" \ --header "Authorization: Bearer DATABASE_TOKEN" \ --header "Content-Type: text/plain; charset=utf-8" \ @@ -594,8 +594,8 @@ home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 ") # Format the response code and error message output. -response_code=${response%%:*} -errormsg=${response#*:} +response_code=${response%%:-*} +errormsg=${response#*:-} # Remove leading and trailing whitespace from errormsg errormsg=$(echo "${errormsg}" | tr -d '[:space:]') diff --git a/content/influxdb/cloud-serverless/get-started/write.md b/content/influxdb/cloud-serverless/get-started/write.md index e1ac5e4ef..cd358cdb4 100644 --- a/content/influxdb/cloud-serverless/get-started/write.md +++ b/content/influxdb/cloud-serverless/get-started/write.md @@ -19,9 +19,11 @@ related: - /telegraf/v1/ --- -This tutorial walks you through the fundamental of creating **line protocol** data and writing it to InfluxDB. +This tutorial walks you through the fundamental of creating **line protocol** +data and writing it to InfluxDB. -InfluxDB provides many different options for ingesting or writing data, including the following: +InfluxDB provides many different options for ingesting or writing data, +including the following: - Influx user interface (UI) - InfluxDB HTTP API (v1 and v2) @@ -30,15 +32,17 @@ InfluxDB provides many different options for ingesting or writing data, includin - InfluxDB client libraries - `influx` CLI -If using tools like Telegraf or InfluxDB client libraries, they can -build the line protocol for you, but it's good to understand how line protocol works. +If using tools like Telegraf or InfluxDB client libraries, they can build the +line protocol for you, but it's good to understand how line protocol works. ## Line protocol All data written to InfluxDB is written using **line protocol**, a text-based -format that lets you provide the necessary information to write a data point to InfluxDB. +format that lets you provide the necessary information to write a data point to +InfluxDB. _This tutorial covers the basics of line protocol, but for detailed information, -see the [Line protocol reference](/influxdb/cloud-serverless/reference/syntax/line-protocol/)._ +see the +[Line protocol reference](/influxdb/cloud-serverless/reference/syntax/line-protocol/)._ ### Line protocol elements @@ -80,7 +84,8 @@ whitespace sensitive. --- -_For schema design recommendations, see [InfluxDB schema design](/influxdb/cloud-serverless/write-data/best-practices/schema-design/)._ +_For schema design recommendations, see +[InfluxDB schema design](/influxdb/cloud-serverless/write-data/best-practices/schema-design/)._ ## Construct line protocol @@ -109,32 +114,32 @@ The following line protocol sample represents data collected hourly beginning at ##### Home sensor data line protocol ```text -home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 -home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 -home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 -home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 -home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 -home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 -home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 -home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 -home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 -home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 -home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 -home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 -home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 -home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 -home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 -home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 -home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 -home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 -home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 -home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 -home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 -home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 -home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 -home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 -home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 -home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 +home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1719924000 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719924000 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1719927600 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719927600 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1719931200 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719931200 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1719934800 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719934800 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1719938400 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719938400 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1719942000 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719942000 +home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1719945600 +home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719945600 +home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1719949200 +home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719949200 +home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1719952800 +home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719952800 +home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1719956400 +home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719956400 +home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1719960000 +home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719960000 +home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1719963600 +home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719963600 +home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1719967200 +home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719967200 ``` {{% /influxdb/custom-timestamps %}} @@ -210,32 +215,32 @@ The UI confirms that the data has been written successfully. influx write \ --bucket get-started \ --precision s " -home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 -home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 -home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 -home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 -home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 -home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 -home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 -home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 -home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 -home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 -home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 -home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 -home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 -home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 -home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 -home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 -home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 -home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 -home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 -home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 -home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 -home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 -home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 -home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 -home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 -home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 +home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1719924000 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719924000 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1719927600 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719927600 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1719931200 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719931200 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1719934800 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719934800 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1719938400 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719938400 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1719942000 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719942000 +home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1719945600 +home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719945600 +home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1719949200 +home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719949200 +home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1719952800 +home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719952800 +home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1719956400 +home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719956400 +home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1719960000 +home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719960000 +home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1719963600 +home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719963600 +home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1719967200 +home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719967200 " ``` @@ -261,32 +266,32 @@ Use [Telegraf](/telegraf/v1/) to consume line protocol, and then write it to ```sh cat <<- EOF > home.lp - home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 - home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 - home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 - home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 - home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 - home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 - home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 - home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 - home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 - home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 - home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 - home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 - home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 - home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 - home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 - home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 - home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 - home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 - home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 - home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 - home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 - home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 - home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 - home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 - home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 - home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 + home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1719924000 + home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719924000 + home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1719927600 + home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719927600 + home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1719931200 + home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719931200 + home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1719934800 + home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719934800 + home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1719938400 + home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719938400 + home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1719942000 + home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719942000 + home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1719945600 + home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719945600 + home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1719949200 + home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719949200 + home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1719952800 + home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719952800 + home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1719956400 + home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719956400 + home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1719960000 + home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719960000 + home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1719963600 + home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719963600 + home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1719967200 + home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719967200 EOF ``` @@ -434,7 +439,7 @@ InfluxDB creates a bucket named `get-started/autogen` and an {{% code-placeholders "API_TOKEN " %}} ```sh -response=$(curl --silent --write-out "%{response_code}:%{errormsg}" \ +response=$(curl --silent --write-out "%{response_code}:-%{errormsg}" \ "https://{{< influxdb/host >}}/write?db=get-started&precision=s" \ --header "Authorization: Token API_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ @@ -469,15 +474,15 @@ home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 ") # Format the response code and error message output. -response_code=${response%%:*} -errormsg=${response#*:} +response_code=${response%%:-*} +errormsg=${response#*:-} # Remove leading and trailing whitespace from errormsg errormsg=$(echo "${errormsg}" | tr -d '[:space:]') echo "$response_code" if [[ $errormsg ]]; then - echo "$errormsg" + echo "$response" fi ``` @@ -536,9 +541,9 @@ to InfluxDB: {{% code-placeholders "API_TOKEN" %}} ```sh -response=$(curl --silent --write-out "%{response_code}:%{errormsg}" \ +response=$(curl --silent --write-out "%{response_code}:-%{errormsg}" \ "https://{{< influxdb/host >}}/api/v2/write?bucket=get-started&precision=s" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Authorization: Token DATABASE_TOKEN" \ --header "Content-Type: text/plain; charset=utf-8" \ --header "Accept: application/json" \ --data-binary " @@ -571,8 +576,8 @@ home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 ") # Format the response code and error message output. -response_code=${response%%:*} -errormsg=${response#*:} +response_code=${response%%:-*} +errormsg=${response#*:-} # Remove leading and trailing whitespace from errormsg errormsg=$(echo "${errormsg}" | tr -d '[:space:]') @@ -683,32 +688,32 @@ dependencies to your current project. ) lines = [ - "home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000", - "home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000", - "home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600", - "home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600", - "home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200", - "home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200", - "home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800", - "home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800", - "home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400", - "home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400", - "home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000", - "home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000", - "home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600", - "home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600", - "home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200", - "home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200", - "home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800", - "home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800", - "home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400", - "home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400", - "home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000", - "home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000", - "home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600", - "home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600", - "home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200", - "home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200" + "home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1719924000", + "home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719924000", + "home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1719927600", + "home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719927600", + "home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1719931200", + "home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719931200", + "home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1719934800", + "home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719934800", + "home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1719938400", + "home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719938400", + "home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1719942000", + "home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719942000", + "home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1719945600", + "home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719945600", + "home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1719949200", + "home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719949200", + "home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1719952800", + "home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719952800", + "home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1719956400", + "home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719956400", + "home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1719960000", + "home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719960000", + "home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1719963600", + "home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719963600", + "home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1719967200", + "home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719967200" ] client.write(lines,write_precision='s') @@ -831,32 +836,32 @@ InfluxDB v3 [influxdb3-go client library package](https://github.com/InfluxCommu // to preserve backslashes and prevent interpretation // of escape sequences--for example, escaped spaces in tag values. lines := [...]string{ - `home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641124000`, - `home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641124000`, - `home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641127600`, - `home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641127600`, - `home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641131200`, - `home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641131200`, - `home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641134800`, - `home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641134800`, - `home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641138400`, - `home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641138400`, - `home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641142000`, - `home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641142000`, - `home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641145600`, - `home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641145600`, - `home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641149200`, - `home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641149200`, - `home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641152800`, - `home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641152800`, - `home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641156400`, - `home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641156400`, - `home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641160000`, - `home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641160000`, - `home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641163600`, - `home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641163600`, - `home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641167200`, - `home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641167200`, + `home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1719124000`, + `home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719124000`, + `home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1719127600`, + `home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719127600`, + `home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1719131200`, + `home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719131200`, + `home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1719134800`, + `home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719134800`, + `home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1719138400`, + `home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719138400`, + `home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1719142000`, + `home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719142000`, + `home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1719145600`, + `home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719145600`, + `home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1719149200`, + `home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719149200`, + `home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1719152800`, + `home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719152800`, + `home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1719156400`, + `home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719156400`, + `home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1719160000`, + `home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719160000`, + `home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1719163600`, + `home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719163600`, + `home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1719167200`, + `home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719167200`, } // Iterate over the lines array and write each line @@ -1007,32 +1012,32 @@ the failure message. * Define line protocol records to write. */ const records = [ - `home,room=Living\\ Room temp=21.1,hum=35.9,co=0i 1641124000`, - `home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641124000`, - `home,room=Living\\ Room temp=21.4,hum=35.9,co=0i 1641127600`, - `home,room=Kitchen temp=23.0,hum=36.2,co=0 1641127600`, - `home,room=Living\\ Room temp=21.8,hum=36.0,co=0i 1641131200`, - `home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641131200`, - `home,room=Living\\ Room temp=22.2,hum=36.0,co=0i 1641134800`, - `home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641134800`, - `home,room=Living\\ Room temp=22.2,hum=35.9,co=0i 1641138400`, - `home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641138400`, - `home,room=Living\\ Room temp=22.4,hum=36.0,co=0i 1641142000`, - `home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641142000`, - `home,room=Living\\ Room temp=22.3,hum=36.1,co=0i 1641145600`, - `home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641145600`, - `home,room=Living\\ Room temp=22.3,hum=36.1,co=1i 1641149200`, - `home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641149200`, - `home,room=Living\\ Room temp=22.4,hum=36.0,co=4i 1641152800`, - `home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641152800`, - `home,room=Living\\ Room temp=22.6,hum=35.9,co=5i 1641156400`, - `home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641156400`, - `home,room=Living\\ Room temp=22.8,hum=36.2,co=9i 1641160000`, - `home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641160000`, - `home,room=Living\\ Room temp=22.5,hum=36.3,co=14i 1641163600`, - `home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641163600`, - `home,room=Living\\ Room temp=22.2,hum=36.4,co=17i 1641167200`, - `home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641167200`, + `home,room=Living\\ Room temp=21.1,hum=35.9,co=0i 1719124000`, + `home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719124000`, + `home,room=Living\\ Room temp=21.4,hum=35.9,co=0i 1719127600`, + `home,room=Kitchen temp=23.0,hum=36.2,co=0 1719127600`, + `home,room=Living\\ Room temp=21.8,hum=36.0,co=0i 1719131200`, + `home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719131200`, + `home,room=Living\\ Room temp=22.2,hum=36.0,co=0i 1719134800`, + `home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719134800`, + `home,room=Living\\ Room temp=22.2,hum=35.9,co=0i 1719138400`, + `home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719138400`, + `home,room=Living\\ Room temp=22.4,hum=36.0,co=0i 1719142000`, + `home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719142000`, + `home,room=Living\\ Room temp=22.3,hum=36.1,co=0i 1719145600`, + `home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719145600`, + `home,room=Living\\ Room temp=22.3,hum=36.1,co=1i 1719149200`, + `home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719149200`, + `home,room=Living\\ Room temp=22.4,hum=36.0,co=4i 1719152800`, + `home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719152800`, + `home,room=Living\\ Room temp=22.6,hum=35.9,co=5i 1719156400`, + `home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719156400`, + `home,room=Living\\ Room temp=22.8,hum=36.2,co=9i 1719160000`, + `home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719160000`, + `home,room=Living\\ Room temp=22.5,hum=36.3,co=14i 1719163600`, + `home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719163600`, + `home,room=Living\\ Room temp=22.2,hum=36.4,co=17i 1719167200`, + `home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719167200`, ]; /** @@ -1204,32 +1209,32 @@ the failure message. * escaped spaces in tag values. */ string[] lines = new string[] { - "home,room=Living\\ Room temp=21.1,hum=35.9,co=0i 1641024000", - "home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000", - "home,room=Living\\ Room temp=21.4,hum=35.9,co=0i 1641027600", - "home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600", - "home,room=Living\\ Room temp=21.8,hum=36.0,co=0i 1641031200", - "home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200", - "home,room=Living\\ Room temp=22.2,hum=36.0,co=0i 1641034800", - "home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800", - "home,room=Living\\ Room temp=22.2,hum=35.9,co=0i 1641038400", - "home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400", - "home,room=Living\\ Room temp=22.4,hum=36.0,co=0i 1641042000", - "home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000", - "home,room=Living\\ Room temp=22.3,hum=36.1,co=0i 1641045600", - "home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600", - "home,room=Living\\ Room temp=22.3,hum=36.1,co=1i 1641049200", - "home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200", - "home,room=Living\\ Room temp=22.4,hum=36.0,co=4i 1641052800", - "home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800", - "home,room=Living\\ Room temp=22.6,hum=35.9,co=5i 1641056400", - "home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400", - "home,room=Living\\ Room temp=22.8,hum=36.2,co=9i 1641060000", - "home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000", - "home,room=Living\\ Room temp=22.5,hum=36.3,co=14i 1641063600", - "home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600", - "home,room=Living\\ Room temp=22.2,hum=36.4,co=17i 1641067200", - "home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200" + "home,room=Living\\ Room temp=21.1,hum=35.9,co=0i 1719924000", + "home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719924000", + "home,room=Living\\ Room temp=21.4,hum=35.9,co=0i 1719927600", + "home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719927600", + "home,room=Living\\ Room temp=21.8,hum=36.0,co=0i 1719931200", + "home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719931200", + "home,room=Living\\ Room temp=22.2,hum=36.0,co=0i 1719934800", + "home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719934800", + "home,room=Living\\ Room temp=22.2,hum=35.9,co=0i 1719938400", + "home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719938400", + "home,room=Living\\ Room temp=22.4,hum=36.0,co=0i 1719942000", + "home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719942000", + "home,room=Living\\ Room temp=22.3,hum=36.1,co=0i 1719945600", + "home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719945600", + "home,room=Living\\ Room temp=22.3,hum=36.1,co=1i 1719949200", + "home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719949200", + "home,room=Living\\ Room temp=22.4,hum=36.0,co=4i 1719952800", + "home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719952800", + "home,room=Living\\ Room temp=22.6,hum=35.9,co=5i 1719956400", + "home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719956400", + "home,room=Living\\ Room temp=22.8,hum=36.2,co=9i 1719960000", + "home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719960000", + "home,room=Living\\ Room temp=22.5,hum=36.3,co=14i 1719963600", + "home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719963600", + "home,room=Living\\ Room temp=22.2,hum=36.4,co=17i 1719967200", + "home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719967200" }; // Write each record separately. @@ -1408,32 +1413,32 @@ _The tutorial assumes using Maven version 3.9 and Java version >= 15._ token, database)) { // Create a list of line protocol records. final List records = List.of( - "home,room=Living\\ Room temp=21.1,hum=35.9,co=0i 1641024000", - "home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000", - "home,room=Living\\ Room temp=21.4,hum=35.9,co=0i 1641027600", - "home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600", - "home,room=Living\\ Room temp=21.8,hum=36.0,co=0i 1641031200", - "home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200", - "home,room=Living\\ Room temp=22.2,hum=36.0,co=0i 1641034800", - "home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800", - "home,room=Living\\ Room temp=22.2,hum=35.9,co=0i 1641038400", - "home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400", - "home,room=Living\\ Room temp=22.4,hum=36.0,co=0i 1641042000", - "home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000", - "home,room=Living\\ Room temp=22.3,hum=36.1,co=0i 1641045600", - "home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600", - "home,room=Living\\ Room temp=22.3,hum=36.1,co=1i 1641049200", - "home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200", - "home,room=Living\\ Room temp=22.4,hum=36.0,co=4i 1641052800", - "home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800", - "home,room=Living\\ Room temp=22.6,hum=35.9,co=5i 1641056400", - "home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400", - "home,room=Living\\ Room temp=22.8,hum=36.2,co=9i 1641060000", - "home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000", - "home,room=Living\\ Room temp=22.5,hum=36.3,co=14i 1641063600", - "home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600", - "home,room=Living\\ Room temp=22.2,hum=36.4,co=17i 1641067200", - "home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200" + "home,room=Living\\ Room temp=21.1,hum=35.9,co=0i 1719924000", + "home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719924000", + "home,room=Living\\ Room temp=21.4,hum=35.9,co=0i 1719927600", + "home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719927600", + "home,room=Living\\ Room temp=21.8,hum=36.0,co=0i 1719931200", + "home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719931200", + "home,room=Living\\ Room temp=22.2,hum=36.0,co=0i 1719934800", + "home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719934800", + "home,room=Living\\ Room temp=22.2,hum=35.9,co=0i 1719938400", + "home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719938400", + "home,room=Living\\ Room temp=22.4,hum=36.0,co=0i 1719942000", + "home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719942000", + "home,room=Living\\ Room temp=22.3,hum=36.1,co=0i 1719945600", + "home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719945600", + "home,room=Living\\ Room temp=22.3,hum=36.1,co=1i 1719949200", + "home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719949200", + "home,room=Living\\ Room temp=22.4,hum=36.0,co=4i 1719952800", + "home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719952800", + "home,room=Living\\ Room temp=22.6,hum=35.9,co=5i 1719956400", + "home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719956400", + "home,room=Living\\ Room temp=22.8,hum=36.2,co=9i 1719960000", + "home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719960000", + "home,room=Living\\ Room temp=22.5,hum=36.3,co=14i 1719963600", + "home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719963600", + "home,room=Living\\ Room temp=22.2,hum=36.4,co=17i 1719967200", + "home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719967200" ); /** diff --git a/content/influxdb/clustered/get-started/write.md b/content/influxdb/clustered/get-started/write.md index 62de9e0d4..7c6b31ef7 100644 --- a/content/influxdb/clustered/get-started/write.md +++ b/content/influxdb/clustered/get-started/write.md @@ -439,7 +439,7 @@ to InfluxDB: {{% code-placeholders "DATABASE_TOKEN" %}} ```sh -response=$(curl --silent --write-out "%{response_code}:%{errormsg}" \ +response=$(curl --silent --write-out "%{response_code}:-%{errormsg}" \ "https://{{< influxdb/host >}}/write?db=get-started&precision=s" \ --header "Authorization: Bearer DATABASE_TOKEN" \ --header "Content-type: text/plain; charset=utf-8" \ @@ -474,8 +474,8 @@ home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 ") # Format the response code and error message output. -response_code=${response%%:*} -errormsg=${response#*:} +response_code=${response%%:-*} +errormsg=${response#*:-} # Remove leading and trailing whitespace from errormsg errormsg=$(echo "${errormsg}" | tr -d '[:space:]') @@ -550,7 +550,7 @@ to InfluxDB: {{% code-placeholders "DATABASE_TOKEN"%}} ```sh -response=$(curl --silent --write-out "%{response_code}:%{errormsg}" \ +response=$(curl --silent --write-out "%{response_code}:-%{errormsg}" \ "https://{{< influxdb/host >}}/api/v2/write?bucket=get-started&precision=s" \ --header "Authorization: Bearer DATABASE_TOKEN" \ --header "Content-Type: text/plain; charset=utf-8" \ @@ -585,8 +585,8 @@ home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 ") # Format the response code and error message output. -response_code=${response%%:*} -errormsg=${response#*:} +response_code=${response%%:-*} +errormsg=${response#*:-} # Remove leading and trailing whitespace from errormsg errormsg=$(echo "${errormsg}" | tr -d '[:space:]') From 9e5f346636d344e2224b1f763820370450339006 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 2 Jul 2024 14:40:13 -0500 Subject: [PATCH 14/96] chore(dedicated): cleanup get-started/query --- .../cloud-dedicated/get-started/query.md | 100 ++++++++++-------- 1 file changed, 58 insertions(+), 42 deletions(-) diff --git a/content/influxdb/cloud-dedicated/get-started/query.md b/content/influxdb/cloud-dedicated/get-started/query.md index 522dced59..e87a7001a 100644 --- a/content/influxdb/cloud-dedicated/get-started/query.md +++ b/content/influxdb/cloud-dedicated/get-started/query.md @@ -35,8 +35,10 @@ the simplicity of SQL. {{% note %}} The examples in this section of the tutorial query the -[**get-started** database](/influxdb/cloud-dedicated/get-started/setup/#create-a-database) for data written in the -[Get started writing data](/influxdb/cloud-dedicated/get-started/write/#write-line-protocol-to-influxdb) section. +[**get-started** database](/influxdb/cloud-dedicated/get-started/setup/#create-a-database) +for data written in the +[Get started writing data](/influxdb/cloud-dedicated/get-started/write/#write-line-protocol-to-influxdb) +section. {{% /note %}} ## Tools to execute queries @@ -204,7 +206,7 @@ WHERE {{% note %}} Some examples in this getting started tutorial assume your InfluxDB -credentials (**URL**, and **token**) are provided by +credentials (**URL** and **token**) are provided by [environment variables](/influxdb/cloud-dedicated/get-started/setup/?t=InfluxDB+API#configure-authentication-credentials). {{% /note %}} @@ -233,18 +235,20 @@ Provide the following: {{% influxdb/custom-timestamps %}} {{% code-placeholders "get-started" %}} + ```sh influxctl query \ --database get-started \ --token $INFLUX_TOKEN \ "SELECT - * -FROM - home -WHERE - time >= '2022-01-01T08:00:00Z' - AND time <= '2022-01-01T20:00:00Z'" + * + FROM + home + WHERE + time >= '2022-01-01T08:00:00Z' + AND time <= '2022-01-01T20:00:00Z'" ``` + {{% /code-placeholders %}} {{% /influxdb/custom-timestamps %}} @@ -308,12 +312,12 @@ _If your project's virtual environment is already running, skip to step 3._ - ```sh - influx3 sql "SELECT * + ```sh + influx3 sql "SELECT * FROM home WHERE time >= '2022-01-01T08:00:00Z' AND time <= '2022-01-01T20:00:00Z'" - ``` + ``` `influx3` displays query results in your terminal. @@ -348,7 +352,7 @@ _If your project's virtual environment is already running, skip to step 3._ python -m venv envs/virtual-env && . ./envs/virtual-env/bin/activate ``` - 3. Install the following dependencies: + 2. Install the following dependencies: {{< req type="key" text="Already installed in the [Write data section](/influxdb/cloud-dedicated/get-started/write/?t=Python#write-line-protocol-to-influxdb)" color="magenta" >}} @@ -364,7 +368,7 @@ _If your project's virtual environment is already running, skip to step 3._ pip install influxdb3-python pandas tabulate ``` - 4. In your terminal or editor, create a new file for your code--for example: `query.py`. + 3. In your terminal or editor, create a new file for your code--for example: `query.py`. 2. In `query.py`, enter the following sample code: @@ -403,10 +407,15 @@ _If your project's virtual environment is already running, skip to step 3._ {{< expand-wrapper >}} {{% expand "Important: If using **Windows**, specify the **Windows** certificate path" %}} - When instantiating the client, Python looks for SSL/TLS certificate authority (CA) certificates for verifying the server's authenticity. - If using a non-POSIX-compliant operating system (such as Windows), you need to specify a certificate bundle path that Python can access on your system. + When instantiating the client, Python looks for SSL/TLS certificate authority + (CA) certificates for verifying the server's authenticity. + If using a non-POSIX-compliant operating system (such as Windows), you need to + specify a certificate bundle path that Python can access on your system. - The following example shows how to use the [Python `certifi` package](https://certifiio.readthedocs.io/en/latest/) and client library options to provide a bundle of trusted certificates to the Python Flight client: + The following example shows how to use the + [Python `certifi` package](https://certifiio.readthedocs.io/en/latest/) and + client library options to provide a bundle of trusted certificates to the + Python Flight client: 1. In your terminal, install the Python `certifi` package. @@ -445,29 +454,31 @@ _If your project's virtual environment is already running, skip to step 3._ 2. Calls the `InfluxDBClient3()` constructor method with credentials to instantiate an InfluxDB `client` with the following credentials: - - **`host`**: {{% product-name omit=" Clustered" %}} cluster URL (without `https://` protocol or trailing slash) + - **`host`**: {{% product-name omit=" Clustered" %}} cluster URL + (without `https://` protocol or trailing slash) - **`token`**: a [database token](/influxdb/cloud-dedicated/admin/tokens/#database-tokens) with read access to the specified database. - _Store this in a secret store or environment variable to avoid exposing the raw token string._ + _Store this in a secret store or environment variable to avoid exposing + the raw token string._ - **`database`**: the name of the {{% product-name %}} database to query - 3. Defines the SQL query to execute and assigns it to a `query` variable. + 1. Defines the SQL query to execute and assigns it to a `query` variable. - 4. Calls the `client.query()` method with the SQL query. + 2. Calls the `client.query()` method with the SQL query. `query()` sends a Flight request to InfluxDB, queries the database, retrieves result data from the endpoint, and then returns a [`pyarrow.Table`](https://arrow.apache.org/docs/python/generated/pyarrow.Table.html#pyarrow.Table) assigned to the `table` variable. - 5. Calls the [`to_pandas()` method](https://arrow.apache.org/docs/python/generated/pyarrow.Table.html#pyarrow.Table.to_pandas) + 3. Calls the [`to_pandas()` method](https://arrow.apache.org/docs/python/generated/pyarrow.Table.html#pyarrow.Table.to_pandas) to convert the Arrow table to a [`pandas.DataFrame`](https://arrow.apache.org/docs/python/pandas.html). - 6. Calls the [`pandas.DataFrame.to_markdown()` method](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_markdown.html) + 4. Calls the [`pandas.DataFrame.to_markdown()` method](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_markdown.html) to convert the DataFrame to a markdown table. - 7. Calls the `print()` method to print the markdown table to stdout. + 5. Calls the `print()` method to print the markdown table to stdout. -2. Enter the following command to run the program and query your {{% product-name omit=" Clustered" %}} cluster: +1. Enter the following command to run the program and query your {{% product-name omit=" Clustered" %}} cluster: @@ -606,20 +617,25 @@ _If your project's virtual environment is already running, skip to step 3._ 2. Defines a `Query()` function that does the following: - 1. Instantiates `influx.Client` with InfluxDB credentials. + 1. Instantiates `influx.Client` with the following parameters for InfluxDB credentials: - **`Host`**: your {{% product-name omit=" Clustered" %}} cluster URL - - **`Database`**: The name of your {{% product-name %}} database + - **`Database`**: the name of your {{% product-name %}} database - **`Token`**: a [database token](/influxdb/cloud-dedicated/admin/tokens/#database-tokens) with read permission on the specified database. - _Store this in a secret store or environment variable to avoid exposing the raw token string._ + _Store this in a secret store or environment variable to avoid + exposing the raw token string._ 2. Defines a deferred function to close the client after execution. 3. Defines a string variable for the SQL query. - 4. Calls the `influxdb3.Client.Query(sql string)` method and passes the SQL string to query InfluxDB. - `Query(sql string)` method returns an `iterator` for data in the response stream. - 5. Iterates over rows, formats the timestamp as an [RFC3339 timestamp](/influxdb/cloud-dedicated/reference/glossary/#rfc3339-timestamp), and prints the data in table format to stdout. + 4. Calls the `influxdb3.Client.Query(sql string)` method and passes the + SQL string to query InfluxDB. + The `Query(sql string)` method returns an `iterator` for data in the + response stream. + 5. Iterates over rows, formats the timestamp as an + [RFC3339 timestamp](/influxdb/cloud-dedicated/reference/glossary/#rfc3339-timestamp), + and prints the data in table format to stdout. 3. In your editor, open the `main.go` file you created in the [Write data section](/influxdb/cloud-dedicated/get-started/write/?t=Go#write-line-protocol-to-influxdb) and insert code to call the `Query()` function--for example: @@ -633,12 +649,13 @@ _If your project's virtual environment is already running, skip to step 3._ } ``` -4. In your terminal, enter the following command to install the necessary packages, build the module, and run the program: +4. In your terminal, enter the following command to install the necessary + packages, build the module, and run the program: ```sh - go mod tidy && go build && go run influxdb_go_client + go mod tidy && go run influxdb_go_client ``` The program executes the `main()` function that writes the data and prints the query results to the console. @@ -724,18 +741,19 @@ _This tutorial assumes you installed Node.js and npm, and created an `influxdb_j - **`host`**: your {{% product-name omit=" Clustered" %}} cluster URL - **`token`**: a [database token](/influxdb/cloud-dedicated/admin/tokens/#database-tokens) with read permission on the database you want to query. - _Store this in a secret store or environment variable to avoid exposing the raw token string._ + _Store this in a secret store or environment variable to avoid exposing + the raw token string._ - 3. Defines a string variable (`sql`) for the SQL query. - 4. Defines an object (`data`) with column names for keys and array values for storing row data. - 5. Calls the `InfluxDBClient.query()` method with the following arguments: + 1. Defines a string variable (`sql`) for the SQL query. + 2. Defines an object (`data`) with column names for keys and array values for storing row data. + 3. Calls the `InfluxDBClient.query()` method with the following arguments: - **`sql`**: the query to execute - **`database`**: the name of the {{% product-name %}} database to query `query()` returns a stream of row vectors. - 6. Iterates over rows and adds the column data to the arrays in `data`. - 7. Passes `data` to the Arrow `tableFromArrays()` function to format the arrays as a table, and then passes the result to the `console.table()` method to output a highlighted table in the terminal. + 4. Iterates over rows and adds the column data to the arrays in `data`. + 5. Passes `data` to the Arrow `tableFromArrays()` function to format the arrays as a table, and then passes the result to the `console.table()` method to output a highlighted table in the terminal. 5. Inside of `index.mjs` (created in the [Write data section](/influxdb/cloud-dedicated/get-started/write/?t=Nodejs)), enter the following sample code to import the modules and call the functions: ```js @@ -756,7 +774,7 @@ _This tutorial assumes you installed Node.js and npm, and created an `influxdb_j main(); ``` -9. In your terminal, execute `index.mjs` to write to and query {{% product-name %}}: +6. In your terminal, execute `index.mjs` to write to and query {{% product-name %}}: @@ -1029,8 +1047,6 @@ _This tutorial assumes using Maven version 3.9, Java version >= 15, and an `infl **Linux/MacOS** - - ```sh export MAVEN_OPTS="--add-opens=java.base/java.nio=ALL-UNNAMED" ``` From 8f0e583cdc6e16023149bc7019de50e5ce7bd0d6 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 2 Jul 2024 22:21:05 -0500 Subject: [PATCH 15/96] fix(clustered): Clustered Telegraf config and example: - Mount a volume to store test project files generated in tests. - Allow URL and `url` in the linter - Set timestamp precision for Telegraf example - Fix Telegraf test, use Python to edit the TOML config file --- .ci/vale/styles/InfluxDataDocs/WordList.yml | 1 - .../vocabularies/InfluxDataDocs/accept.txt | 2 + .lintstagedrc.mjs | 8 + .../influxdb/clustered/get-started/write.md | 264 ++++++++++-------- test/src/requirements.txt | 1 + 5 files changed, 154 insertions(+), 122 deletions(-) diff --git a/.ci/vale/styles/InfluxDataDocs/WordList.yml b/.ci/vale/styles/InfluxDataDocs/WordList.yml index 59ab8922c..5582bd6ac 100644 --- a/.ci/vale/styles/InfluxDataDocs/WordList.yml +++ b/.ci/vale/styles/InfluxDataDocs/WordList.yml @@ -82,6 +82,5 @@ swap: tablename: table name tablet: device touch: tap - url: URL vs\.: versus World Wide Web: web diff --git a/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt index 5eb80d548..2d1e152a6 100644 --- a/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt +++ b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt @@ -72,6 +72,8 @@ quoteChar retentionRules sourceBucket tagKey +`url[s]?` +URL v2 v3 venv diff --git a/.lintstagedrc.mjs b/.lintstagedrc.mjs index c88b4ac20..31c075721 100644 --- a/.lintstagedrc.mjs +++ b/.lintstagedrc.mjs @@ -34,6 +34,13 @@ function pytestStagedContent(paths, productPath) { `docker build . -f Dockerfile.pytest -t influxdata-docs/pytest:latest`, + + + // Create a Docker volume for temporary files generated during testing + `sh -c "docker volume create \ + --label tag=influxdata-docs \ + --label stage=test \ + --name test-tmp || true"`, // Run test runners. // Uses a pytest plugin to suppress exit code 5 (if no tests are found), @@ -49,6 +56,7 @@ function pytestStagedContent(paths, productPath) { --env-file ${productPath}/.env.test \ --volumes-from ${CONTENT} \ --mount type=bind,src=./test/shared,dst=/shared \ + --mount type=volume,source=test-tmp,target=/app/iot-starter \ influxdata-docs/pytest --codeblocks --suppress-no-test-exit-code --exitfirst ${productPath}/`, ]; } diff --git a/content/influxdb/clustered/get-started/write.md b/content/influxdb/clustered/get-started/write.md index 7c6b31ef7..fa3150f9b 100644 --- a/content/influxdb/clustered/get-started/write.md +++ b/content/influxdb/clustered/get-started/write.md @@ -204,32 +204,32 @@ influxctl write \ --database get-started \ --token $INFLUX_TOKEN \ --precision s \ - 'home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 -home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 -home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 -home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 -home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 -home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 -home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 -home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 -home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 -home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 -home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 -home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 -home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 -home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 -home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 -home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 -home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 -home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 -home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 -home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 -home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 -home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 -home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 -home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 -home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 -home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200' + 'home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1719734400 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719734400 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1719738000 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719738000 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1719741600 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719741600 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1719745200 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719745200 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1719748800 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719748800 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1719752400 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719752400 +home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1719756000 +home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719756000 +home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1719759600 +home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719759600 +home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1719763200 +home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719763200 +home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1719766800 +home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719766800 +home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1719770400 +home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719770400 +home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1719774000 +home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719774000 +home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1719777600 +home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719777600' ``` {{% /code-placeholders %}} @@ -258,47 +258,48 @@ and then write it to {{< product-name >}}. ```sh cat <<- EOF > home.lp - home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 - home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 - home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 - home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 - home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 - home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 - home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 - home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 - home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 - home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 - home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 - home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 - home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 - home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 - home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 - home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 - home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 - home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 - home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 - home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 - home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 - home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 - home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 - home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 - home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 - home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 + home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1719820800 + home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719820800 + home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1719824400 + home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719824400 + home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1719828000 + home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719828000 + home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1719831600 + home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719831600 + home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1719835200 + home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719835200 + home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1719838800 + home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719838800 + home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1719842400 + home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719842400 + home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1719846000 + home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719846000 + home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1719849600 + home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719849600 + home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1719853200 + home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719853200 + home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1719856800 + home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719856800 + home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1719860400 + home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719860400 + home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1719864000 + home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719864000 EOF ``` 3. Run the following command to generate a Telegraf configuration file - (`./telegraf.conf`) that enables the `inputs.file` and `outputs.influxdb_v2` + (`telegraf.conf`) that enables the `inputs.file` and `outputs.influxdb_v2` plugins: ```sh - telegraf --sample-config \ + mkdir -p iot-project \ + && telegraf --sample-config \ --input-filter file \ --output-filter influxdb_v2 \ - > telegraf.conf + > iot-project/telegraf.conf ``` -4. In your editor, open `./telegraf.conf` and configure the following: +4. In your editor, open `iot-project/telegraf.conf` and configure the following: - **`file` input plugin**: In the `[[inputs.file]].files` list, replace `"/tmp/metrics.out"` with your sample data filename. If Telegraf can't @@ -309,12 +310,28 @@ and then write it to {{< product-name >}}. ## Files to parse each interval. Accept standard unix glob matching rules, ## as well as ** to match recursive files and directories. files = ["home.lp"] + # Set the timestamp precision to the precision in your data. + influx_timestamp_precision = '1s' + ## Optionally, use the newer, more efficient line protocol parser + influx_parser_type = 'upstream' ``` @@ -338,15 +355,20 @@ and then write it to {{< product-name >}}. ``` @@ -367,7 +389,7 @@ and then write it to {{< product-name >}}. Enter the following command in your terminal: ```sh - telegraf --once --config ./telegraf.conf + telegraf --once --config iot-project/telegraf.conf ``` If the write is successful, the output is similar to the following: @@ -445,32 +467,32 @@ response=$(curl --silent --write-out "%{response_code}:-%{errormsg}" \ --header "Content-type: text/plain; charset=utf-8" \ --header "Accept: application/json" \ --data-binary " -home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 -home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 -home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 -home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 -home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 -home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 -home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 -home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 -home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 -home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 -home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 -home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 -home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 -home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 -home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 -home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 -home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 -home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 -home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 -home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 -home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 -home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 -home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 -home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 -home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 -home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 +home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1719734400 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719734400 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1719738000 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719738000 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1719741600 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719741600 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1719745200 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719745200 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1719748800 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719748800 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1719752400 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719752400 +home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1719756000 +home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719756000 +home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1719759600 +home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719759600 +home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1719763200 +home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719763200 +home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1719766800 +home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719766800 +home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1719770400 +home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719770400 +home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1719774000 +home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719774000 +home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1719777600 +home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719777600 ") # Format the response code and error message output. @@ -556,32 +578,32 @@ response=$(curl --silent --write-out "%{response_code}:-%{errormsg}" \ --header "Content-Type: text/plain; charset=utf-8" \ --header "Accept: application/json" \ --data-binary " -home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 -home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 -home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 -home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 -home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 -home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 -home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 -home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 -home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 -home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 -home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 -home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 -home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 -home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 -home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 -home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 -home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 -home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 -home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 -home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 -home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 -home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 -home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 -home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 -home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 -home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 +home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1719734400 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719734400 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1719738000 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719738000 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1719741600 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719741600 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1719745200 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719745200 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1719748800 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719748800 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1719752400 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719752400 +home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1719756000 +home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719756000 +home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1719759600 +home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719759600 +home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1719763200 +home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719763200 +home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1719766800 +home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719766800 +home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1719770400 +home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719770400 +home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1719774000 +home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719774000 +home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1719777600 +home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719777600 ") # Format the response code and error message output. diff --git a/test/src/requirements.txt b/test/src/requirements.txt index 7d366d868..788173c44 100644 --- a/test/src/requirements.txt +++ b/test/src/requirements.txt @@ -7,6 +7,7 @@ pytest-dotenv>=0.5.2 # Allow pytest to pass if no tests (i.e. testable code blocks) are found. pytest-custom-exit-code>=0.3.0 requests>=2.26.0 +toml # Code sample dependencies influxdb3-python @ git+https://github.com/InfluxCommunity/influxdb3-python@v0.5.0 influxdb3-python-cli @ git+https://github.com/InfluxCommunity/influxdb3-python-cli@main From bb64ca32bba7d18d3d7ce32624ff8659674834a0 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 3 Jul 2024 09:38:20 -0500 Subject: [PATCH 16/96] fix(v3): duplicate word, test updates --- .lintstagedrc.mjs | 18 ++- CONTRIBUTING.md | 7 +- .../query-data/influxql/basic-query.md | 111 ++++++++++------- .../query-data/influxql/basic-query.md | 113 +++++++++++------- test/src/prepare-content.sh | 1 + 5 files changed, 158 insertions(+), 92 deletions(-) diff --git a/.lintstagedrc.mjs b/.lintstagedrc.mjs index 31c075721..fd6794abc 100644 --- a/.lintstagedrc.mjs +++ b/.lintstagedrc.mjs @@ -48,16 +48,14 @@ function pytestStagedContent(paths, productPath) { // Instead of the plugin, we could use a placeholder test that always or conditionally passes. // Whether tests pass or fail, the container is removed, // but the CONTENT container and associated volume will remain until the next run. - // Note: the docker run -t flag is required to allocate a pseudo-TTY for the container--required for opening influxctl OAuth URLs. - `docker run --rm -t \ - --label tag=influxdata-docs \ - --label stage=test \ - --name ${TEST} \ - --env-file ${productPath}/.env.test \ - --volumes-from ${CONTENT} \ - --mount type=bind,src=./test/shared,dst=/shared \ - --mount type=volume,source=test-tmp,target=/app/iot-starter \ - influxdata-docs/pytest --codeblocks --suppress-no-test-exit-code --exitfirst ${productPath}/`, + // Note: TTY is required for the container to open influxctl OAuth URLs in the host browser. + `docker run --tty=true --label tag=influxdata-docs --label stage=test \ + --name ${TEST} \ + --env-file ${productPath}/.env.test \ + --volumes-from ${CONTENT} \ + --mount type=bind,src=./test/shared,dst=/shared \ + --mount type=volume,source=test-tmp,target=/app/iot-starter \ + influxdata-docs/pytest --codeblocks --suppress-no-test-exit-code --exitfirst ${productPath}/`, ]; } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6b93ebb0a..031707ba1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -77,12 +77,17 @@ scripts configured in `.husky/pre-commit`, including linting and tests for your **We strongly recommend running linting and tests**, but you can skip them (and avoid installing dependencies) -by including the `--no-verify` flag with your commit--for example, enter the following command in your terminal: +by including the `HUSKY=0` environment variable or the `--no-verify` flag with +your commit--for example: ```sh git commit -m "" --no-verify ``` +```sh +HUSKY=0 git commit +``` + For more options, see the [Husky documentation](https://typicode.github.io/husky/how-to.html#skipping-git-hooks). ### Set up test scripts and credentials diff --git a/content/influxdb/cloud-dedicated/query-data/influxql/basic-query.md b/content/influxdb/cloud-dedicated/query-data/influxql/basic-query.md index 1b6a1b779..13f1611aa 100644 --- a/content/influxdb/cloud-dedicated/query-data/influxql/basic-query.md +++ b/content/influxdb/cloud-dedicated/query-data/influxql/basic-query.md @@ -25,20 +25,29 @@ following clauses: {{< req type="key" >}} -- {{< req "\*">}} `SELECT`: Specify fields, tags, and calculations to return from a - measurement or use the wildcard alias (`*`) to select all fields and tags - from a measurement. It requires at least one - [field key](/influxdb/cloud-dedicated/reference/glossary/#field-key) or the wildcard alias (`*`). - For more information, see [Notable SELECT statement behaviors](/influxdb/cloud-dedicated/reference/influxql/select/#notable-select-statement-behaviors). -- {{< req "\*">}} `FROM`: Specify the [measurement](/influxdb/cloud-dedicated/reference/glossary/#measurement) to query from. -It requires one or more comma-delimited [measurement expressions](/influxdb/cloud-dedicated/reference/influxql/select/#measurement_expression). +- {{< req "\*">}} `SELECT`: Specify fields, tags, and calculations to return + from a [table](/influxdb/cloud-dedicated/reference/glossary/#table) or use the + wildcard alias (`*`) to select all fields and tags from a table. It requires + at least one + [field key](/influxdb/cloud-dedicated/reference/glossary/#field-key) or the + wildcard alias (`*`). For more information, see + [Notable SELECT statement behaviors](/influxdb/cloud-dedicated/reference/influxql/select/#notable-select-statement-behaviors). +- {{< req "\*">}} `FROM`: Specify the + [table](/influxdb/cloud-dedicated/reference/glossary/#table) to query from. + + It requires one or more comma-delimited + [measurement expressions](/influxdb/cloud-dedicated/reference/influxql/select/#measurement_expression). + - `WHERE`: Filter data based on -[field values](/influxdb/cloud-dedicated/reference/glossary/#field), -[tag values](/influxdb/cloud-dedicated/reference/glossary/#tag), or -[timestamps](/influxdb/cloud-dedicated/reference/glossary/#timestamp). Only return data that meets the specified conditions--for example, falls within - a time range, contains specific tag values, or contains a field value outside a specified range. + [field values](/influxdb/cloud-dedicated/reference/glossary/#field), + [tag values](/influxdb/cloud-dedicated/reference/glossary/#tag), or + [timestamps](/influxdb/cloud-dedicated/reference/glossary/#timestamp). Only + return data that meets the specified conditions--for example, falls within a + time range, contains specific tag values, or contains a field value outside a + specified range. {{% influxdb/custom-timestamps %}} + ```sql SELECT temp, @@ -49,21 +58,28 @@ WHERE time >= '2022-01-01T08:00:00Z' AND time <= '2022-01-01T20:00:00Z' ``` + {{% /influxdb/custom-timestamps %}} ## Result set -If at least one row satisfies the query, {{% product-name %}} returns row data in the query result set. -If a query uses a `GROUP BY` clause, the result set includes the following: +If at least one row satisfies the query, {{% product-name %}} returns row data +in the query result set. +If a query uses a `GROUP BY` clause, the result set +includes the following: - Columns listed in the query's `SELECT` clause - A `time` column that contains the timestamp for the record or the group -- An `iox::measurement` column that contains the record's measurement (table) name -- Columns listed in the query's `GROUP BY` clause; each row in the result set contains the values used for grouping +- An `iox::measurement` column that contains the record's + [table](/influxdb/cloud-dedicated/reference/glossary/#table) name +- Columns listed in the query's `GROUP BY` clause; each row in the result set + contains the values used for grouping ### GROUP BY result columns -If a query uses `GROUP BY` and the `WHERE` clause doesn't filter by time, then groups are based on the [default time range](/influxdb/cloud-dedicated/reference/influxql/group-by/#default-time-range). +If a query uses `GROUP BY` and the `WHERE` clause doesn't filter by time, then +groups are based on the +[default time range](/influxdb/cloud-dedicated/reference/influxql/group-by/#default-time-range). ## Basic query examples @@ -75,9 +91,10 @@ If a query uses `GROUP BY` and the `WHERE` clause doesn't filter by time, then g - [Alias queried fields and tags](#alias-queried-fields-and-tags) {{% note %}} + #### Sample data -The following examples use the +The following examples use the [Get started home sensor data](/influxdb/cloud-dedicated/reference/sample-data/#get-started-home-sensor-data). To run the example queries and return results, [write the sample data](/influxdb/cloud-dedicated/reference/sample-data/#write-the-home-sensor-data-to-influxdb) @@ -89,12 +106,14 @@ to your {{% product-name %}} database before running the example queries. - Use the `SELECT` clause to specify what tags and fields to return. Specify at least one field key. To return all tags and fields, use the wildcard alias (`*`). -- Specify the measurement to query in the `FROM` clause. -- Specify time boundaries in the `WHERE` clause. - Include time-based predicates that compare the value of the `time` column to a timestamp. +- Specify the [table](/influxdb/cloud-dedicated/reference/glossary/#table) to + query in the `FROM` clause. +- Specify time boundaries in the `WHERE` clause. Include time-based predicates + that compare the value of the `time` column to a timestamp. Use the `AND` logical operator to chain multiple predicates together. {{% influxdb/custom-timestamps %}} + ```sql SELECT * FROM home @@ -102,13 +121,13 @@ WHERE time >= '2022-01-01T08:00:00Z' AND time <= '2022-01-01T12:00:00Z' ``` + {{% /influxdb/custom-timestamps %}} Query time boundaries can be relative or absolute. {{< expand-wrapper >}} {{% expand "Query with relative time boundaries" %}} - To query data from relative time boundaries, compare the value of the `time` column to a timestamp calculated by subtracting an interval from a timestamp. Use `now()` to return the timestamp for the current time (UTC). @@ -119,7 +138,7 @@ Use `now()` to return the timestamp for the current time (UTC). SELECT * FROM home WHERE time >= now() - 30d ``` -##### Query one day of data data from a week ago +##### Query one day of data from a week ago ```sql SELECT * @@ -128,16 +147,18 @@ WHERE time >= now() - 7d AND time <= now() - 6d ``` + {{% /expand %}} {{% expand "Query with absolute time boundaries" %}} -To query data from absolute time boundaries, compare the value of the `time` column -to a timestamp literal. -Use the `AND` logical operator to chain together multiple predicates and define -both start and stop boundaries for the query. +To query data from absolute time boundaries, compare the value of the `time` +column to a timestamp literal. +Use the `AND` logical operator to chain together +multiple predicates and define both start and stop boundaries for the query. {{% influxdb/custom-timestamps %}} + ```sql SELECT * @@ -147,6 +168,7 @@ WHERE time >= '2022-01-01T08:00:00Z' AND time <= '2022-01-01T20:00:00Z' ``` + {{% /influxdb/custom-timestamps %}} {{% /expand %}} @@ -156,8 +178,8 @@ WHERE To query data without time boundaries, do not include any time-based predicates in your `WHERE` clause. -If a time range is not defined in the `WHERE` clause, the default time range is -the Unix epoch (`1970-01-01T00:00:00Z`) to _now_. +If a time range is not defined in the `WHERE` clause, +the default time range is the Unix epoch (`1970-01-01T00:00:00Z`) to _now_. {{% warn %}} Querying data _without time bounds_ can return an unexpected amount of data. @@ -172,8 +194,8 @@ SELECT * FROM home To query specific fields, include them in the `SELECT` clause. If querying multiple fields or tags, comma-delimit each. -If a field or tag key includes special characters or spaces or is case-sensitive, -wrap the key in _double-quotes_. +If a field or tag key includes special characters or spaces or is +case-sensitive, wrap the key in _double-quotes_. ```sql SELECT time, room, temp, hum FROM home @@ -181,10 +203,12 @@ SELECT time, room, temp, hum FROM home ### Query fields based on tag values -- In the `SELECT` clause, include fields you want to query and tags you want to base conditions on. -- In the `WHERE` clause, include predicates that compare the tag identifier to a string literal. - Use [logical operators](/influxdb/cloud-dedicated/reference/influxql/where/#logical-operators) to chain multiple predicates together and apply - multiple conditions. +- In the `SELECT` clause, include fields you want to query and tags you want to + base conditions on. +- In the `WHERE` clause, include predicates that compare the tag identifier to a + string literal. Use + [logical operators](/influxdb/cloud-dedicated/reference/influxql/where/#logical-operators) + to chain multiple predicates together and apply multiple conditions. ```sql SELECT * FROM home WHERE room = 'Kitchen' @@ -193,9 +217,12 @@ SELECT * FROM home WHERE room = 'Kitchen' ### Query points based on field values - In the `SELECT` clause, include fields you want to query. -- In the `WHERE` clause, include predicates that compare the field identifier to a value or expression. - Use [logical operators](/influxdb/cloud-dedicated/reference/influxql/where/#logical-operators) (`AND`, `OR`) to chain multiple predicates together - and apply multiple conditions. +- In the `WHERE` clause, include predicates that compare the field identifier to + a value or expression. + Use + [logical operators](/influxdb/cloud-dedicated/reference/influxql/where/#logical-operators) + (`AND`, `OR`) to chain multiple predicates together and apply multiple + conditions. ```sql SELECT co, time FROM home WHERE co >= 10 OR co <= -10 @@ -204,13 +231,17 @@ SELECT co, time FROM home WHERE co >= 10 OR co <= -10 ### Alias queried fields and tags To alias or rename fields and tags that you query, use the `AS` clause. -After the tag, field, or expression you want to alias, pass `AS` followed by the alias name as an identifier (wrap in double quotes (`"`) if the alias includes spaces or special characters)--for example: +After the tag, field, or expression you want to alias, pass `AS` followed by the +alias name as an identifier (wrap in double quotes (`"`) if the alias includes +spaces or special characters)--for example: ```sql SELECT temp AS temperature, hum AS "humidity (%)" FROM home ``` {{% note %}} -When aliasing columns in **InfluxQL**, use the `AS` clause and an [identifier](/influxdb/cloud-dedicated/reference/influxql/#identifiers). -When [aliasing columns in **SQL**](/influxdb/cloud-dedicated/query-data/sql/basic-query/#alias-queried-fields-and-tags), you can use the `AS` clause to define the alias, but it isn't necessary. +When aliasing columns in **InfluxQL**, use the `AS` clause and an +[identifier](/influxdb/cloud-dedicated/reference/influxql/#identifiers). When +[aliasing columns in **SQL**](/influxdb/cloud-dedicated/query-data/sql/basic-query/#alias-queried-fields-and-tags), +you can use the `AS` clause to define the alias, but it isn't necessary. {{% /note %}} diff --git a/content/influxdb/clustered/query-data/influxql/basic-query.md b/content/influxdb/clustered/query-data/influxql/basic-query.md index d4126cce1..db9d336a8 100644 --- a/content/influxdb/clustered/query-data/influxql/basic-query.md +++ b/content/influxdb/clustered/query-data/influxql/basic-query.md @@ -25,20 +25,29 @@ following clauses: {{< req type="key" >}} -- {{< req "\*">}} `SELECT`: Specify fields, tags, and calculations to return from a - measurement or use the wildcard alias (`*`) to select all fields and tags - from a measurement. It requires at least one - [field key](/influxdb/clustered/reference/glossary/#field-key) or the wildcard alias (`*`). - For more information, see [Notable SELECT statement behaviors](/influxdb/clustered/reference/influxql/select/#notable-select-statement-behaviors). -- {{< req "\*">}} `FROM`: Specify the [measurement](/influxdb/clustered/reference/glossary/#measurement) to query from. -It requires one or more comma-delimited [measurement expressions](/influxdb/clustered/reference/influxql/select/#measurement_expression). +- {{< req "\*">}} `SELECT`: Specify fields, tags, and calculations to return + from a [table](/influxdb/clustered/reference/glossary/#table) or use the + wildcard alias (`*`) to select all fields and tags from a table. It requires + at least one + [field key](/influxdb/clustered/reference/glossary/#field-key) or the + wildcard alias (`*`). For more information, see + [Notable SELECT statement behaviors](/influxdb/clustered/reference/influxql/select/#notable-select-statement-behaviors). +- {{< req "\*">}} `FROM`: Specify the + [table](/influxdb/clustered/reference/glossary/#table) to query from. + + It requires one or more comma-delimited + [measurement expressions](/influxdb/clustered/reference/influxql/select/#measurement_expression). + - `WHERE`: Filter data based on -[field values](/influxdb/clustered/reference/glossary/#field), -[tag values](/influxdb/clustered/reference/glossary/#tag), or -[timestamps](/influxdb/clustered/reference/glossary/#timestamp). Only return data that meets the specified conditions--for example, falls within - a time range, contains specific tag values, or contains a field value outside a specified range. + [field values](/influxdb/clustered/reference/glossary/#field), + [tag values](/influxdb/clustered/reference/glossary/#tag), or + [timestamps](/influxdb/clustered/reference/glossary/#timestamp). Only + return data that meets the specified conditions--for example, falls within a + time range, contains specific tag values, or contains a field value outside a + specified range. {{% influxdb/custom-timestamps %}} + ```sql SELECT temp, @@ -49,21 +58,28 @@ WHERE time >= '2022-01-01T08:00:00Z' AND time <= '2022-01-01T20:00:00Z' ``` + {{% /influxdb/custom-timestamps %}} ## Result set -If at least one row satisfies the query, {{% product-name %}} returns row data in the query result set. -If a query uses a `GROUP BY` clause, the result set includes the following: +If at least one row satisfies the query, {{% product-name %}} returns row data +in the query result set. +If a query uses a `GROUP BY` clause, the result set +includes the following: - Columns listed in the query's `SELECT` clause - A `time` column that contains the timestamp for the record or the group -- An `iox::measurement` column that contains the record's measurement (table) name -- Columns listed in the query's `GROUP BY` clause; each row in the result set contains the values used for grouping +- An `iox::measurement` column that contains the record's + [table](/influxdb/clustered/reference/glossary/#table) name +- Columns listed in the query's `GROUP BY` clause; each row in the result set + contains the values used for grouping ### GROUP BY result columns -If a query uses `GROUP BY` and the `WHERE` clause doesn't filter by time, then groups are based on the [default time range](/influxdb/clustered/reference/influxql/group-by/#default-time-range). +If a query uses `GROUP BY` and the `WHERE` clause doesn't filter by time, then +groups are based on the +[default time range](/influxdb/clustered/reference/influxql/group-by/#default-time-range). ## Basic query examples @@ -75,9 +91,10 @@ If a query uses `GROUP BY` and the `WHERE` clause doesn't filter by time, then g - [Alias queried fields and tags](#alias-queried-fields-and-tags) {{% note %}} + #### Sample data -The following examples use the +The following examples use the [Get started home sensor data](/influxdb/clustered/reference/sample-data/#get-started-home-sensor-data). To run the example queries and return results, [write the sample data](/influxdb/clustered/reference/sample-data/#write-the-home-sensor-data-to-influxdb) @@ -89,12 +106,14 @@ to your {{% product-name %}} database before running the example queries. - Use the `SELECT` clause to specify what tags and fields to return. Specify at least one field key. To return all tags and fields, use the wildcard alias (`*`). -- Specify the measurement to query in the `FROM` clause. -- Specify time boundaries in the `WHERE` clause. - Include time-based predicates that compare the value of the `time` column to a timestamp. +- Specify the [table](/influxdb/clustered/reference/glossary/#table) to + query in the `FROM` clause. +- Specify time boundaries in the `WHERE` clause. Include time-based predicates + that compare the value of the `time` column to a timestamp. Use the `AND` logical operator to chain multiple predicates together. {{% influxdb/custom-timestamps %}} + ```sql SELECT * FROM home @@ -102,6 +121,7 @@ WHERE time >= '2022-01-01T08:00:00Z' AND time <= '2022-01-01T12:00:00Z' ``` + {{% /influxdb/custom-timestamps %}} Query time boundaries can be relative or absolute. @@ -119,7 +139,8 @@ Use `now()` to return the timestamp for the current time (UTC). SELECT * FROM home WHERE time >= now() - 30d ``` -##### Query one day of data data from a week ago +##### Query one day of data from a week ago + ```sql SELECT * FROM home @@ -127,16 +148,18 @@ WHERE time >= now() - 7d AND time <= now() - 6d ``` + {{% /expand %}} {{% expand "Query with absolute time boundaries" %}} -To query data from absolute time boundaries, compare the value of the `time` column -to a timestamp literal. -Use the `AND` logical operator to chain together multiple predicates and define -both start and stop boundaries for the query. +To query data from absolute time boundaries, compare the value of the `time` +column to a timestamp literal. +Use the `AND` logical operator to chain together +multiple predicates and define both start and stop boundaries for the query. {{% influxdb/custom-timestamps %}} + ```sql SELECT * @@ -146,6 +169,7 @@ WHERE time >= '2022-01-01T08:00:00Z' AND time <= '2022-01-01T20:00:00Z' ``` + {{% /influxdb/custom-timestamps %}} {{% /expand %}} @@ -155,8 +179,8 @@ WHERE To query data without time boundaries, do not include any time-based predicates in your `WHERE` clause. -If a time range is not defined in the `WHERE` clause, the default time range is -the Unix epoch (`1970-01-01T00:00:00Z`) to _now_. +If a time range is not defined in the `WHERE` clause, +the default time range is the Unix epoch (`1970-01-01T00:00:00Z`) to _now_. {{% warn %}} Querying data _without time bounds_ can return an unexpected amount of data. @@ -171,8 +195,8 @@ SELECT * FROM home To query specific fields, include them in the `SELECT` clause. If querying multiple fields or tags, comma-delimit each. -If a field or tag key includes special characters or spaces or is case-sensitive, -wrap the key in _double-quotes_. +If a field or tag key includes special characters or spaces or is +case-sensitive, wrap the key in _double-quotes_. ```sql SELECT time, room, temp, hum FROM home @@ -180,10 +204,12 @@ SELECT time, room, temp, hum FROM home ### Query fields based on tag values -- In the `SELECT` clause, include fields you want to query and tags you want to base conditions on. -- In the `WHERE` clause, include predicates that compare the tag identifier to a string literal. - Use [logical operators](/influxdb/clustered/reference/influxql/where/#logical-operators) to chain multiple predicates together and apply - multiple conditions. +- In the `SELECT` clause, include fields you want to query and tags you want to + base conditions on. +- In the `WHERE` clause, include predicates that compare the tag identifier to a + string literal. Use + [logical operators](/influxdb/clustered/reference/influxql/where/#logical-operators) + to chain multiple predicates together and apply multiple conditions. ```sql SELECT * FROM home WHERE room = 'Kitchen' @@ -192,9 +218,12 @@ SELECT * FROM home WHERE room = 'Kitchen' ### Query points based on field values - In the `SELECT` clause, include fields you want to query. -- In the `WHERE` clause, include predicates that compare the field identifier to a value or expression. - Use [logical operators](/influxdb/clustered/reference/influxql/where/#logical-operators) (`AND`, `OR`) to chain multiple predicates together - and apply multiple conditions. +- In the `WHERE` clause, include predicates that compare the field identifier to + a value or expression. + Use + [logical operators](/influxdb/clustered/reference/influxql/where/#logical-operators) + (`AND`, `OR`) to chain multiple predicates together and apply multiple + conditions. ```sql SELECT co, time FROM home WHERE co >= 10 OR co <= -10 @@ -203,15 +232,17 @@ SELECT co, time FROM home WHERE co >= 10 OR co <= -10 ### Alias queried fields and tags To alias or rename fields and tags that you query, use the `AS` clause. -After the tag, field, or expression you want to alias, pass `AS` followed by the alias name as an identifier (wrap in double quotes (`"`) if the alias includes spaces or special characters)--for example: +After the tag, field, or expression you want to alias, pass `AS` followed by the +alias name as an identifier (wrap in double quotes (`"`) if the alias includes +spaces or special characters)--for example: ```sql SELECT temp AS temperature, hum AS "humidity (%)" FROM home ``` {{% note %}} -When aliasing columns in **InfluxQL**, use the `AS` clause and an [identifier](/influxdb/clustered/reference/influxql/#identifiers). -When [aliasing columns in **SQL**](/influxdb/clustered/query-data/sql/basic-query/#alias-queried-fields-and-tags), you can use the `AS` clause to define the alias, but it isn't necessary. +When aliasing columns in **InfluxQL**, use the `AS` clause and an +[identifier](/influxdb/clustered/reference/influxql/#identifiers). When +[aliasing columns in **SQL**](/influxdb/clustered/query-data/sql/basic-query/#alias-queried-fields-and-tags), +you can use the `AS` clause to define the alias, but it isn't necessary. {{% /note %}} - - diff --git a/test/src/prepare-content.sh b/test/src/prepare-content.sh index a41d4b6d4..42ed5dfe9 100644 --- a/test/src/prepare-content.sh +++ b/test/src/prepare-content.sh @@ -52,6 +52,7 @@ function substitute_placeholders { /os.getenv("DATABASE_TOKEN")/! s/DATABASE_TOKEN/$INFLUX_TOKEN/g; /os.getenv("DATABASE_NAME")/! s/DATABASE_NAME/$INFLUX_DATABASE/g; s/--id DBRP_ID/--id $INFLUX_DBRP_ID/g; + s/example-db/$INFLUX_DATABASE/g; s/get-started/$INFLUX_DATABASE/g; /os.getenv("MANAGEMENT_TOKEN")/! s/MANAGEMENT_TOKEN/$MANAGEMENT_TOKEN/g; /os.getenv("ORG_ID")/! s/ORG_ID/$INFLUX_ORG/g; From 4e601710b1fc73559f970b2b4bdfbe9c6e935a78 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 3 Jul 2024 13:23:57 -0500 Subject: [PATCH 17/96] chore(ci): Adjusts Unix timestamps (line protocol) and query time bounds for testing - Adjusts times for writing and querying data within a recent retention period. - Adjusts Unix timestamps (in line protocol samples) to yesterday's date. - Adjusts UTC time bounds (in queries) to today and yesterday. - Reverts mistakenly changed timestamps in clustered. --- .../cloud-dedicated/get-started/query.md | 48 ++++ .../influxdb/clustered/get-started/query.md | 78 +++++-- .../influxdb/clustered/get-started/write.md | 208 +++++++++--------- test/src/prepare-content.sh | 41 +++- 4 files changed, 253 insertions(+), 122 deletions(-) diff --git a/content/influxdb/cloud-dedicated/get-started/query.md b/content/influxdb/cloud-dedicated/get-started/query.md index e87a7001a..a3437de27 100644 --- a/content/influxdb/cloud-dedicated/get-started/query.md +++ b/content/influxdb/cloud-dedicated/get-started/query.md @@ -204,6 +204,44 @@ WHERE ``` {{% /influxdb/custom-timestamps %}} + + {{% note %}} Some examples in this getting started tutorial assume your InfluxDB credentials (**URL** and **token**) are provided by @@ -252,6 +290,16 @@ influxctl query \ {{% /code-placeholders %}} {{% /influxdb/custom-timestamps %}} +{{% note %}} +#### Query using stored credentials + +Optionally, you can configure `database` and `token` query credentials in your `influxctl` +[connection profile](/influxdb/clustered/reference/cli/influxctl/#create-a-configuration-file). + +The `--database` and `--token` command line flags override credentials in your +configuration file. +{{% /note %}} + {{% /tab-content %}} {{% tab-content %}} diff --git a/content/influxdb/clustered/get-started/query.md b/content/influxdb/clustered/get-started/query.md index 0f704a444..530c5551a 100644 --- a/content/influxdb/clustered/get-started/query.md +++ b/content/influxdb/clustered/get-started/query.md @@ -35,8 +35,10 @@ the simplicity of SQL. {{% note %}} The examples in this section of the tutorial query the -[**get-started** database](/influxdb/clustered/get-started/setup/#create-a-database) for data written in the -[Get started writing data](/influxdb/clustered/get-started/write/#write-line-protocol-to-influxdb) section. +[**get-started** database](/influxdb/clustered/get-started/setup/#create-a-database) +for data written in the +[Get started writing data](/influxdb/clustered/get-started/write/#write-line-protocol-to-influxdb) +section. {{% /note %}} ## Tools to execute queries @@ -202,6 +204,44 @@ WHERE ``` {{% /influxdb/custom-timestamps %}} + + {{% note %}} Some examples in this getting started tutorial assume your InfluxDB credentials (**URL** and **token**) are provided by @@ -233,21 +273,33 @@ Provide the following: {{% influxdb/custom-timestamps %}} {{% code-placeholders "get-started" %}} + ```sh influxctl query \ --database get-started \ --token $INFLUX_TOKEN \ "SELECT - * -FROM - home -WHERE - time >= '2022-01-01T08:00:00Z' - AND time <= '2022-01-01T20:00:00Z'" + * + FROM + home + WHERE + time >= '2022-01-01T08:00:00Z' + AND time <= '2022-01-01T20:00:00Z'" ``` + {{% /code-placeholders %}} {{% /influxdb/custom-timestamps %}} +{{% note %}} +#### Query using stored credentials + +Optionally, you can configure `database` and `token` query credentials in your `influxctl` +[connection profile](/influxdb/clustered/reference/cli/influxctl/#create-a-configuration-file). + +The `--database` and `--token` command line flags override credentials in your +configuration file. +{{% /note %}} + {{% /tab-content %}} {{% tab-content %}} @@ -308,12 +360,12 @@ _If your project's virtual environment is already running, skip to step 3._ - ```sh - influx3 sql "SELECT * + ```sh + influx3 sql "SELECT * FROM home WHERE time >= '2022-01-01T08:00:00Z' AND time <= '2022-01-01T20:00:00Z'" - ``` + ``` `influx3` displays query results in your terminal. @@ -606,7 +658,7 @@ _If your project's virtual environment is already running, skip to step 3._ 2. Defines a `Query()` function that does the following: - 1. Instantiates `influx.Client` with InfluxDB credentials. + 1. Instantiates `influx.Client` with the following parameters for InfluxDB credentials: - **`Host`**: your {{% product-name omit=" Clustered" %}} cluster URL - **`Database`**: the name of your {{% product-name %}} database @@ -637,7 +689,7 @@ _If your project's virtual environment is already running, skip to step 3._ ```sh - go mod tidy && go build && go run influxdb_go_client + go mod tidy && go run influxdb_go_client ``` The program executes the `main()` function that writes the data and prints the query results to the console. diff --git a/content/influxdb/clustered/get-started/write.md b/content/influxdb/clustered/get-started/write.md index fa3150f9b..18dd81f20 100644 --- a/content/influxdb/clustered/get-started/write.md +++ b/content/influxdb/clustered/get-started/write.md @@ -204,32 +204,32 @@ influxctl write \ --database get-started \ --token $INFLUX_TOKEN \ --precision s \ - 'home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1719734400 -home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719734400 -home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1719738000 -home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719738000 -home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1719741600 -home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719741600 -home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1719745200 -home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719745200 -home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1719748800 -home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719748800 -home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1719752400 -home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719752400 -home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1719756000 -home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719756000 -home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1719759600 -home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719759600 -home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1719763200 -home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719763200 -home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1719766800 -home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719766800 -home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1719770400 -home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719770400 -home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1719774000 -home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719774000 -home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1719777600 -home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719777600' + 'home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 +home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 +home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 +home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 +home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 +home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 +home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 +home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 +home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 +home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 +home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 +home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 +home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 +home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 +home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200' ``` {{% /code-placeholders %}} @@ -258,32 +258,32 @@ and then write it to {{< product-name >}}. ```sh cat <<- EOF > home.lp - home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1719820800 - home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719820800 - home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1719824400 - home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719824400 - home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1719828000 - home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719828000 - home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1719831600 - home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719831600 - home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1719835200 - home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719835200 - home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1719838800 - home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719838800 - home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1719842400 - home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719842400 - home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1719846000 - home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719846000 - home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1719849600 - home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719849600 - home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1719853200 - home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719853200 - home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1719856800 - home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719856800 - home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1719860400 - home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719860400 - home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1719864000 - home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719864000 + home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 + home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 + home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 + home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 + home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 + home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 + home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 + home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 + home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 + home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 + home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 + home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 + home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 + home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 + home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 + home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 + home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 + home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 + home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 + home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 + home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 + home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 + home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 + home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 + home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 + home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 EOF ``` @@ -467,32 +467,32 @@ response=$(curl --silent --write-out "%{response_code}:-%{errormsg}" \ --header "Content-type: text/plain; charset=utf-8" \ --header "Accept: application/json" \ --data-binary " -home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1719734400 -home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719734400 -home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1719738000 -home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719738000 -home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1719741600 -home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719741600 -home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1719745200 -home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719745200 -home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1719748800 -home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719748800 -home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1719752400 -home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719752400 -home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1719756000 -home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719756000 -home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1719759600 -home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719759600 -home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1719763200 -home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719763200 -home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1719766800 -home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719766800 -home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1719770400 -home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719770400 -home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1719774000 -home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719774000 -home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1719777600 -home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719777600 +home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 +home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 +home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 +home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 +home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 +home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 +home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 +home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 +home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 +home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 +home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 +home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 +home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 +home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 +home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 ") # Format the response code and error message output. @@ -578,32 +578,32 @@ response=$(curl --silent --write-out "%{response_code}:-%{errormsg}" \ --header "Content-Type: text/plain; charset=utf-8" \ --header "Accept: application/json" \ --data-binary " -home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1719734400 -home,room=Kitchen temp=21.0,hum=35.9,co=0i 1719734400 -home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1719738000 -home,room=Kitchen temp=23.0,hum=36.2,co=0i 1719738000 -home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1719741600 -home,room=Kitchen temp=22.7,hum=36.1,co=0i 1719741600 -home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1719745200 -home,room=Kitchen temp=22.4,hum=36.0,co=0i 1719745200 -home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1719748800 -home,room=Kitchen temp=22.5,hum=36.0,co=0i 1719748800 -home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1719752400 -home,room=Kitchen temp=22.8,hum=36.5,co=1i 1719752400 -home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1719756000 -home,room=Kitchen temp=22.8,hum=36.3,co=1i 1719756000 -home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1719759600 -home,room=Kitchen temp=22.7,hum=36.2,co=3i 1719759600 -home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1719763200 -home,room=Kitchen temp=22.4,hum=36.0,co=7i 1719763200 -home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1719766800 -home,room=Kitchen temp=22.7,hum=36.0,co=9i 1719766800 -home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1719770400 -home,room=Kitchen temp=23.3,hum=36.9,co=18i 1719770400 -home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1719774000 -home,room=Kitchen temp=23.1,hum=36.6,co=22i 1719774000 -home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1719777600 -home,room=Kitchen temp=22.7,hum=36.5,co=26i 1719777600 +home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 +home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 +home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 +home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 +home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 +home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 +home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 +home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 +home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 +home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 +home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 +home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 +home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 +home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 +home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 ") # Format the response code and error message output. diff --git a/test/src/prepare-content.sh b/test/src/prepare-content.sh index 42ed5dfe9..4da6fa345 100644 --- a/test/src/prepare-content.sh +++ b/test/src/prepare-content.sh @@ -5,6 +5,12 @@ TEST_CONTENT="/app/content" +# Pattern to match a 10-digit Unix timestamp +TIMESTAMP_PATTERN='[0-9]{10}' + +NOW=$(date -u +"%Y-%m-%dT%H:%M:%SZ") +YESTERDAY=$(date -u -d 'yesterday 00:00' '+%Y-%m-%dT%H:%M:%SZ') + function substitute_placeholders { for file in `find "$TEST_CONTENT" -type f \( -iname '*.md' \)`; do if [ -f "$file" ]; then @@ -12,9 +18,37 @@ function substitute_placeholders { # Replaces placeholder values with environment variable references. + # Date-specific replacements. + + grep -oE "$TIMESTAMP_PATTERN" "$file" | while read -r timestamp; do + # Replace Unix timestamps (for example, in line protocol sample data) with yesterday's date. + # Assuming the Unix timestamp is the whole line or a standalone word in the line + # Validate the extracted timestamp (optional) + if [[ $timestamp =~ ^1641[0-9]{6,12}$ ]]; then + specific_timestamp=$timestamp + + # Extract the time part + specific_time=$(date -u -d "@$specific_timestamp" '+%T') + + # Calculate 'yesterday' date but use 'specific_time' for the time part + yesterday_date=$(date -u -d 'yesterday' '+%Y-%m-%d') + yesterday_datetime="$yesterday_date"T"$specific_time"Z + + # Convert 'yesterday_datetime' to Unix timestamp + yesterday_timestamp=$(date -u -d "$yesterday_datetime" +%s) + + # Replace the extracted timestamp with `yesterday_timestamp` + sed -i "s|$specific_timestamp|$yesterday_timestamp|g;" $file + fi + done + + ## Adjust time bounds in queries to be the current time and yesterday. + sed -i "s|'2022-01-01T20:00:00Z'|'$NOW'|g; + s|'2022-01-01T08:00:00Z'|'$YESTERDAY'|g; + " $file + # Non-language-specific replacements. - sed -i 's|https:\/\/{{< influxdb/host >}}|$INFLUX_HOST|g; - ' $file + sed -i 's|https:\/\/{{< influxdb/host >}}|$INFLUX_HOST|g;' $file # Python-specific replacements. # Use f-strings to identify placeholders in Python while also keeping valid syntax if @@ -38,9 +72,6 @@ function substitute_placeholders { s|"name": "BUCKET_NAME"|"name": "$INFLUX_DATABASE"|g;' \ $file - sed -i 's|"influxctl database create --retention-period 1y get-started"|"influxctl database create --retention-period 1y $INFLUX_TMP_DATABASE"|g;' \ - $file - # Replace remaining placeholders with variables. # If the placeholder is inside of a Python os.getenv() function, don't replace it. # Note the specific use of double quotes for the os.getenv() arguments here. You'll need to use double quotes in your code samples for this to match. From a8a772264cb49a8ad35a122e40a2c2f107c08bdf Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 3 Jul 2024 13:50:27 -0500 Subject: [PATCH 18/96] chore(ci): Skip tests for deleting databases. --- .../reference/cli/influxctl/database/delete.md | 9 +++++++++ .../clustered/reference/cli/influxctl/database/delete.md | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/content/influxdb/cloud-dedicated/reference/cli/influxctl/database/delete.md b/content/influxdb/cloud-dedicated/reference/cli/influxctl/database/delete.md index 98b8e7f4e..e93f454d0 100644 --- a/content/influxdb/cloud-dedicated/reference/cli/influxctl/database/delete.md +++ b/content/influxdb/cloud-dedicated/reference/cli/influxctl/database/delete.md @@ -14,6 +14,9 @@ Cloud Dedicated cluster. ## Usage + + + ```sh influxctl database delete [command options] [--force] [...] ``` @@ -50,12 +53,18 @@ _Also see [`influxctl` global flags](/influxdb/cloud-dedicated/reference/cli/inf ##### Delete a database named "mydb" + + + ```sh influxctl database delete mydb ``` ##### Delete multiple databases + + + ```sh influxctl database delete mydb1 mydb2 ``` diff --git a/content/influxdb/clustered/reference/cli/influxctl/database/delete.md b/content/influxdb/clustered/reference/cli/influxctl/database/delete.md index 66e1da395..3d69c355b 100644 --- a/content/influxdb/clustered/reference/cli/influxctl/database/delete.md +++ b/content/influxdb/clustered/reference/cli/influxctl/database/delete.md @@ -12,6 +12,9 @@ The `influxctl database delete` command deletes a database from an InfluxDB clus ## Usage + + + ```sh influxctl database delete [command options] [--force] [...] ``` @@ -48,12 +51,18 @@ _Also see [`influxctl` global flags](/influxdb/clustered/reference/cli/influxctl ##### Delete a database named "mydb" + + + ```sh influxctl database delete mydb ``` ##### Delete multiple databases + + + ```sh influxctl database delete mydb1 mydb2 ``` From 8b590068d0a25e8974ccca0be3f0dc682efd6dfa Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 3 Jul 2024 13:54:07 -0500 Subject: [PATCH 19/96] fix(v3): Tips, typos, and formatting --- .../cloud-serverless/get-started/query.md | 58 +++++++++++-------- 1 file changed, 35 insertions(+), 23 deletions(-) diff --git a/content/influxdb/cloud-serverless/get-started/query.md b/content/influxdb/cloud-serverless/get-started/query.md index bbd5a56df..823cd453a 100644 --- a/content/influxdb/cloud-serverless/get-started/query.md +++ b/content/influxdb/cloud-serverless/get-started/query.md @@ -1,6 +1,6 @@ --- title: Get started querying data -seotitle: Query data | Get started with InfluxDB +seotitle: Query data | Get started with InfluxDB Cloud Serverless list_title: Query data description: > Get started querying data in InfluxDB by learning about SQL and InfluxQL, and @@ -402,10 +402,15 @@ _If your project's virtual environment is already running, skip to step 3._ {{< expand-wrapper >}} {{% expand "Important: If using **Windows**, specify the **Windows** certificate path" %}} - When instantiating the client, Python looks for SSL/TLS certificate authority (CA) certificates for verifying the server's authenticity. - If using a non-POSIX-compliant operating system (such as Windows), you need to specify a certificate bundle path that Python can access on your system. + When instantiating the client, Python looks for SSL/TLS certificate authority + (CA) certificates for verifying the server's authenticity. + If using a non-POSIX-compliant operating system (such as Windows), you need to + specify a certificate bundle path that Python can access on your system. - The following example shows how to use the [Python `certifi` package](https://certifiio.readthedocs.io/en/latest/) and client library options to provide a bundle of trusted certificates to the Python Flight client: + The following example shows how to use the + [Python `certifi` package](https://certifiio.readthedocs.io/en/latest/) and + client library options to provide a bundle of trusted certificates to the + Python Flight client: 1. In your terminal, install the Python `certifi` package. @@ -444,28 +449,30 @@ _If your project's virtual environment is already running, skip to step 3._ 2. Calls the `InfluxDBClient3()` constructor method with credentials to instantiate an InfluxDB `client` with the following credentials: - - **`host`**: {{% product-name %}} region hostname (URL without protocol or trailing slash) + - **`host`**: {{% product-name %}} region hostname + (without `https://` protocol or trailing slash) - **`database`**: the name of the [{{% product-name %}} bucket](/influxdb/cloud-serverless/admin/buckets/) to query - **`token`**: an [API token](/influxdb/cloud-serverless/admin/tokens/) with _read_ access to the specified bucket. - _Store this in a secret store or environment variable to avoid exposing the raw token string._ + _Store this in a secret store or environment variable to avoid exposing + the raw token string._ - 3. Defines the SQL query to execute and assigns it to a `query` variable. + 1. Defines the SQL query to execute and assigns it to a `query` variable. - 4. Calls the `client.query()` method with the SQL query. + 2. Calls the `client.query()` method with the SQL query. `query()` sends a Flight request to InfluxDB, queries the database (bucket), retrieves result data from the endpoint, and then returns a [`pyarrow.Table`](https://arrow.apache.org/docs/python/generated/pyarrow.Table.html#pyarrow.Table) assigned to the `table` variable. - 5. Calls the [`to_pandas()` method](https://arrow.apache.org/docs/python/generated/pyarrow.Table.html#pyarrow.Table.to_pandas) + 3. Calls the [`to_pandas()` method](https://arrow.apache.org/docs/python/generated/pyarrow.Table.html#pyarrow.Table.to_pandas) to convert the Arrow table to a [`pandas.DataFrame`](https://arrow.apache.org/docs/python/pandas.html). - 6. Calls the [`pandas.DataFrame.to_markdown()` method](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_markdown.html) + 4. Calls the [`pandas.DataFrame.to_markdown()` method](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_markdown.html) to convert the DataFrame to a markdown table. - 7. Calls the `print()` method to print the markdown table to stdout. + 5. Calls the `print()` method to print the markdown table to stdout. -6. In your terminal, enter the following command to run the program and query {{% product-name %}}: +1. In your terminal, enter the following command to run the program and query {{% product-name %}}: @@ -609,14 +616,18 @@ _If your project's virtual environment is already running, skip to step 3._ - **`Host`**: your {{% product-name %}} region URL - **`Database`**: The name of your {{% product-name %}} bucket - **`Token`**: an [API token](/influxdb/cloud-serverless/admin/tokens/) with read permission on the specified bucket. - _Store this in a secret store or environment variable to avoid exposing the raw token string._ + _Store this in a secret store or environment variable to avoid + exposing the raw token string._ 2. Defines a deferred function to close the client after execution. 3. Defines a string variable for the SQL query. - 4. Calls the `influxdb3.Client.Query(sql string)` method and passes the SQL string to query InfluxDB. - `Query(sql string)` method returns an `iterator` for data in the response stream. - 5. Iterates over rows, formats the timestamp as an [RFC3339 timestamp](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp), and prints the data in table format to stdout. + 4. Calls the `influxdb3.Client.Query(sql string)` method and passes the + SQL string to query InfluxDB. + The `Query(sql string)` method returns an `iterator` for data in the + response stream. + 5. Iterates over rows, formats the timestamp as an + [RFC3339 timestamp](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp),and prints the data in table format to stdout. 3. In your editor, open the `main.go` file you created in the [Write data section](/influxdb/cloud-serverless/get-started/write/?t=Go#write-line-protocol-to-influxdb) and insert code to call the `Query()` function--for example: @@ -630,12 +641,13 @@ _If your project's virtual environment is already running, skip to step 3._ } ``` -4. In your terminal, enter the following command to install the necessary packages, build the module, and run the program: +4. In your terminal, enter the following command to install the necessary + packages, build the module, and run the program: ```sh - go mod tidy && go build && go run influxdb_go_client + go mod tidy && go run influxdb_go_client ``` The program executes the `main()` function that writes the data and prints the query results to the console. @@ -719,8 +731,10 @@ _This tutorial assumes you installed Node.js and npm, and created an `influxdb_j with InfluxDB credentials. - **`host`**: your {{% product-name %}} region URL - - **`token`**: an [API token](/influxdb/cloud-serverless/admin/tokens/) with _read_ access to the specified bucket. - _Store this in a secret store or environment variable to avoid exposing the raw token string._ + - **`token`**: an [API token](/influxdb/cloud-serverless/admin/tokens/) + with _read_ permission on the bucket you want to query. + _Store this in a secret store or environment variable to avoid exposing + the raw token string._ 3. Defines a string variable (`sql`) for the SQL query. 4. Defines an object (`data`) with column names for keys and array values for storing row data. @@ -752,7 +766,7 @@ _This tutorial assumes you installed Node.js and npm, and created an `influxdb_j main(); ``` -9. In your terminal, execute `index.mjs` to write to and query {{% product-name %}}: +6. In your terminal, execute `index.mjs` to write to and query {{% product-name %}}: @@ -1023,8 +1037,6 @@ _This tutorial assumes using Maven version 3.9, Java version >= 15, and an `infl **Linux/MacOS** - - ```sh export MAVEN_OPTS="--add-opens=java.base/java.nio=ALL-UNNAMED" ``` From 838d615692428c651860f3d48cf6b09eaa34538e Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 3 Jul 2024 16:58:43 -0500 Subject: [PATCH 20/96] chore(ci): Update migrate guide for testing. Monitor for URLs that the container tries to open in a browser and then open them in the host. --- .husky/pre-commit | 4 ++-- .lintstagedrc.mjs | 10 ++++++-- Dockerfile.pytest | 3 +-- .../migrate-1x-to-cloud-dedicated.md | 24 ++++++++++++++++--- test/src/monitor-container-urls.sh | 8 +++---- test/src/monitor-tests.sh | 23 ++++++++++++++---- test/src/pytest.ini | 8 ++++++- 7 files changed, 61 insertions(+), 19 deletions(-) diff --git a/.husky/pre-commit b/.husky/pre-commit index 9b223fbff..42e27d76a 100644 --- a/.husky/pre-commit +++ b/.husky/pre-commit @@ -1,6 +1,6 @@ # If you're running tests in a container that doesn't support TTY (docker run -t), you can use monitor-tests.sh to open URLs in the host's browser. # Your test needs to redirect the URL to the test/urls.txt file--for example: # influxctl database update /dev/null > test/urls.txt -# ./test/src/monitor-tests.sh start +sh ./test/src/monitor-tests.sh start npx lint-staged --relative -# ./test/src/monitor-tests.sh kill +sh ./test/src/monitor-tests.sh stop diff --git a/.lintstagedrc.mjs b/.lintstagedrc.mjs index fd6794abc..2a36884cd 100644 --- a/.lintstagedrc.mjs +++ b/.lintstagedrc.mjs @@ -48,14 +48,20 @@ function pytestStagedContent(paths, productPath) { // Instead of the plugin, we could use a placeholder test that always or conditionally passes. // Whether tests pass or fail, the container is removed, // but the CONTENT container and associated volume will remain until the next run. - // Note: TTY is required for the container to open influxctl OAuth URLs in the host browser. + // Note: Run the container with TTY to open influxctl OAuth URLs in the host browser. + // Run pytest with: + // -s to make pytest output log info during the test (instead of after). + // --suppress-no-test-exit-code to suppress exit code 5 (no tests collected). + // --exitfirst to stop after the first failure. + // --codeblocks to test code blocks in markdown files. + // Run `docker run --tty=true --label tag=influxdata-docs --label stage=test \ --name ${TEST} \ --env-file ${productPath}/.env.test \ --volumes-from ${CONTENT} \ --mount type=bind,src=./test/shared,dst=/shared \ --mount type=volume,source=test-tmp,target=/app/iot-starter \ - influxdata-docs/pytest --codeblocks --suppress-no-test-exit-code --exitfirst ${productPath}/`, + influxdata-docs/pytest -s --codeblocks --suppress-no-test-exit-code --exitfirst ${productPath}/`, ]; } diff --git a/Dockerfile.pytest b/Dockerfile.pytest index f7bb443c9..fd1ee1c23 100644 --- a/Dockerfile.pytest +++ b/Dockerfile.pytest @@ -26,10 +26,9 @@ ENV PYTHONUNBUFFERED=1 WORKDIR /app -# Create a mock xdg-open script` to prevent the test suite from attempting to open a browser (for example, during influxctl OAuth2 authentication), and instead execute the host-open script. +# Create a mock xdg-open script` to prevent the test suite from attempting to open a browser (for example, during influxctl OAuth2 authentication). RUN echo '#!/bin/bash' > /usr/local/bin/xdg-open \ && echo 'echo "$1" > /shared/urls.txt' >> /usr/local/bin/xdg-open \ - && echo 'echo "$1" >> /shared/host_open.log' >> /usr/local/bin/xdg-open \ && chmod +x /usr/local/bin/xdg-open # Some Python test dependencies (pytest-dotenv and pytest-codeblocks) aren't diff --git a/content/influxdb/cloud-dedicated/guides/migrate-data/migrate-1x-to-cloud-dedicated.md b/content/influxdb/cloud-dedicated/guides/migrate-data/migrate-1x-to-cloud-dedicated.md index 3f6c1e49f..543e28eeb 100644 --- a/content/influxdb/cloud-dedicated/guides/migrate-data/migrate-1x-to-cloud-dedicated.md +++ b/content/influxdb/cloud-dedicated/guides/migrate-data/migrate-1x-to-cloud-dedicated.md @@ -60,7 +60,7 @@ The simplest way to do this is to directly modify the line protocol exported in For example, the following line protocol includes both a tag and field named `temp`. -``` +```text home,room=Kitchen,temp=F co=0i,hum=56.6,temp=71.0 1672531200000000000 ``` @@ -143,13 +143,13 @@ the [exported line protocol](#migrate-data-step-1) to group certain fields into unique measurements. For example: -``` +```text example-measurement field1=0,field2=0,field3=0,field4=0,field5=0,field6=0,field7=0,field8=0 1672531200000000000 ``` Would become: -``` +```text new-measurement-1 field1=0,field2=0,field3=0,field4=0 1672531200000000000 new-measurement-2 field5=0,field6=0,field7=0,field8=0 1672531200000000000 ``` @@ -209,6 +209,9 @@ databases. {{% /note %}} ##### Export all data in a database and retention policy to a file + + + ```sh influx_inspect export \ -lponly \ @@ -221,6 +224,8 @@ databases. {{< expand-wrapper >}} {{% expand "Export all data to a file" %}} + + ```sh influx_inspect export \ -lponly \ @@ -231,6 +236,8 @@ influx_inspect export \ {{% expand "Export all data to a compressed file" %}} + + ```sh influx_inspect export \ -lponly \ @@ -242,6 +249,8 @@ influx_inspect export \ {{% expand "Export data within time bounds to a file" %}} + + ```sh influx_inspect export \ -lponly \ @@ -254,6 +263,8 @@ influx_inspect export \ {{% expand "Export a database and all its retention policies to a file" %}} + + ```sh influx_inspect export \ -lponly \ @@ -265,6 +276,8 @@ influx_inspect export \ {{% expand "Export a specific database and retention policy to a file" %}} + + ```sh influx_inspect export \ -lponly \ @@ -277,6 +290,8 @@ influx_inspect export \ {{% expand "Export all data from _non-default_ `data` and `wal` directories" %}} + + ```sh influx_inspect export \ -lponly \ @@ -329,6 +344,9 @@ You would create the following InfluxDB {{< current-version >}} databases: (default is infinite) - Database name _(see [Database naming restrictions](#database-naming-restrictions))_ + + + ```sh influxctl database create --retention-period 30d ``` diff --git a/test/src/monitor-container-urls.sh b/test/src/monitor-container-urls.sh index d9113d1c1..1938c5727 100755 --- a/test/src/monitor-container-urls.sh +++ b/test/src/monitor-container-urls.sh @@ -3,20 +3,20 @@ ## This script is meant to be run on the host and monitors a file for URLs written by a container. # The file to monitor for URLs written by the container. -FILE="./test/shared/urls.txt" +URL_FILE="./test/shared/urls.txt" # Define the URL pattern for OAuth2 authorization. OAUTH_PATTERN='https://auth\.influxdata\.com/activate\?user_code=[A-Z]{1,8}-[A-Z]{1,8}' # Loop indefinitely while true; do - if [ -f "$FILE" ]; then + if [ -f "$URL_FILE" ]; then # Extract an OAuth2 authorization URL from the file - URL=$(grep -Eo "$OAUTH_PATTERN" "$FILE") + URL=$(grep -Eo "$OAUTH_PATTERN" "$URL_FILE") if [ "$URL" ]; then # Open the URL in the default browser open "$URL" # Clear the file to indicate the URL has been handled - > "$FILE" + > "$URL_FILE" fi fi sleep 1 diff --git a/test/src/monitor-tests.sh b/test/src/monitor-tests.sh index f3aa56c5c..fac2fad2a 100755 --- a/test/src/monitor-tests.sh +++ b/test/src/monitor-tests.sh @@ -1,13 +1,26 @@ +pid_file=./test/monitor_urls_pid function start { - ./test/src/monitor-container-urls.sh & echo $! > ./test/monitor_urls_pid + ./test/src/monitor-container-urls.sh & echo $! >> "$pid_file" } -function kill_process { - PID=$(cat ./test/monitor_urls_pid) && kill -9 $PID && rm ./test/monitor_urls_pid +function kill_processes { + # Kill all processes in the monitor_urls_pid file + echo "Cleaning up monitor-container-urls processes..." + while read -r PID; do + kill $PID 2>/dev/null; ps -p $PID > /dev/null + if [ $? -ne 0 ]; then + sed -i '' "/$PID/d" "$pid_file" + echo "Successfully stopped monitor-container-urls process $PID" + else + # Leave it in the file to try stopping it again next time + # and output the error message + echo "Failed to stop monitor-container-urls process $PID" + fi + done < "$pid_file" } case "$1" in start) start ;; - kill) kill_process ;; - *) echo "Usage: $0 {start|kill}" ;; + stop) kill_processes ;; + *) echo "Usage: $0 {start|stop}" ;; esac \ No newline at end of file diff --git a/test/src/pytest.ini b/test/src/pytest.ini index 79e51cf9b..2a8564cca 100644 --- a/test/src/pytest.ini +++ b/test/src/pytest.ini @@ -8,4 +8,10 @@ python_classes = *Test # Collect all functions. python_functions = * -filterwarnings = ignore::pytest.PytestReturnNotNoneWarning \ No newline at end of file +filterwarnings = ignore::pytest.PytestReturnNotNoneWarning +# Log settings. +log_file = /shared/tests_run.log +log_file_date_format = %Y-%m-%d %H:%M:%S +log_file_format = %(asctime)s - %(name)s %(levelname)s %(message)s +# INFO level log messages for extracting authentication URLs output during tests. +log_file_level = INFO \ No newline at end of file From 91c3577c6fa4ebc397c11d30e8240e3d0219661f Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 3 Jul 2024 17:11:37 -0500 Subject: [PATCH 21/96] chore(ci): Fix comment. --- .lintstagedrc.mjs | 1 - 1 file changed, 1 deletion(-) diff --git a/.lintstagedrc.mjs b/.lintstagedrc.mjs index 2a36884cd..13d88eb1d 100644 --- a/.lintstagedrc.mjs +++ b/.lintstagedrc.mjs @@ -48,7 +48,6 @@ function pytestStagedContent(paths, productPath) { // Instead of the plugin, we could use a placeholder test that always or conditionally passes. // Whether tests pass or fail, the container is removed, // but the CONTENT container and associated volume will remain until the next run. - // Note: Run the container with TTY to open influxctl OAuth URLs in the host browser. // Run pytest with: // -s to make pytest output log info during the test (instead of after). // --suppress-no-test-exit-code to suppress exit code 5 (no tests collected). From e061b5172628f945e1c6537434c2e8f1fbb1334d Mon Sep 17 00:00:00 2001 From: Jennifer Moore Date: Wed, 10 Jul 2024 12:03:19 -0500 Subject: [PATCH 22/96] clustered: add section for custom CA on egress (#5519) * clustered: add section for custom CA on egress * Apply suggestions from code review Address review comments Co-authored-by: Scott Anderson --------- Co-authored-by: Scott Anderson --- .../clustered/install/configure-cluster.md | 53 +++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/content/influxdb/clustered/install/configure-cluster.md b/content/influxdb/clustered/install/configure-cluster.md index 5b8d0bbcd..b3dcf52d1 100644 --- a/content/influxdb/clustered/install/configure-cluster.md +++ b/content/influxdb/clustered/install/configure-cluster.md @@ -821,4 +821,57 @@ spec: {{% /code-placeholders %}} +### Provide a custom certificate authority bundle {metadata="Optional"} + +InfluxDB attempts to make TLS connections to the services it depends on; notably +the [Catalog](/influxdb/clustered/reference/internals/storage-engine/#catalog), +and the [Object store](/influxdb/clustered/reference/internals/storage-engine/#object-store). +InfluxDB validates the certificates for all of the connections it makes. + +**If you host these services yourself and you use a private or otherwise not +well-known certificate authority to issue certificates to theses services**, +InfluxDB will not recognize the issuer and will be unable to validate the certificates. +To allow InfluxDB to validate these certificates, provide a PEM certificate +bundle containing your custom certificate authority chain. + +1. Use `kubectl` to create a config map containing your PEM bundle. + Your certificate authority administrator should provide you with a + PEM-formatted certificate bundle file. + + {{% note %}} +This PEM-formatted bundle file is *not* the certificate that InfluxDB uses to +host its own TLS endpoints. This bundle establishes a chain of trust for the +external services that InfluxDB depends on. + {{% /note %}} + + In the example below, `private_ca.pem` is the certificate bundle file. + + ```sh + kubectl --namespace influxdb create configmap custom-ca --from-file=certs.pem=/path/to/private_ca.pem + ``` + + {{% note %}} +It's possible to append multiple certificates into the same bundle. +This can help if you need to include intermediate certificates or explicitly +include leaf certificates. Leaf certificates should be included before any +intermediate certificates they depend on. The root certificate should +be last in the bundle. + {{% /note %}} + +2. Update your `AppInstance` resource in your `myinfluxdb.yml` to refer to your + certificate authority config map. Update the `.spec.package.spec.egress` + property to refer to that config map. For example: + + ```yml + spec: + package: + spec: + egress: + customCertificates: + valueFrom: + configMapKeyRef: + key: ca.pem + name: custom-ca + ``` + {{< page-nav prev="/influxdb/clustered/install/auth/" prevText="Set up authentication" next="/influxdb/clustered/install/deploy/" nextText="Deploy your cluster" >}} From 0deaa85aeb6346437ea4eaff7af1e5f7c7a1328c Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Wed, 10 Jul 2024 11:15:14 -0600 Subject: [PATCH 23/96] hotfix: minor edits to clustered config doc --- .../clustered/install/configure-cluster.md | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/content/influxdb/clustered/install/configure-cluster.md b/content/influxdb/clustered/install/configure-cluster.md index b3dcf52d1..443ac2f06 100644 --- a/content/influxdb/clustered/install/configure-cluster.md +++ b/content/influxdb/clustered/install/configure-cluster.md @@ -72,12 +72,14 @@ template) that contains key information, such as: ## Configure your cluster -1. [Create a cluster configuration file](#create-a-cluster-configuration-file) -2. [Create a namespace for InfluxDB](#create-a-namespace-for-influxdb) -3. [Install kubecfg kubit operator](#install-kubecfg-kubit-operator) -4. [Configure access to the InfluxDB container registry](#configure-access-to-the-influxdb-container-registry) -5. [Set up cluster ingress](#set-up-cluster-ingress) -6. [Modify the configuration file to point to prerequisites](#modify-the-configuration-file-to-point-to-prerequisites) +1. [Create a cluster configuration file](#create-a-cluster-configuration-file) +2. [Create a namespace for InfluxDB](#create-a-namespace-for-influxdb) +3. [Install kubecfg kubit operator](#install-kubecfg-kubit-operator) +4. [Configure access to the InfluxDB container registry](#configure-access-to-the-influxdb-container-registry) +5. [Set up cluster ingress](#set-up-cluster-ingress) +6. [Modify the configuration file to point to prerequisites](#modify-the-configuration-file-to-point-to-prerequisites) +7. [Provide a custom certificate authority bundle](#provide-a-custom-certificate-authority-bundle) + (Optional) ### Create a cluster configuration file @@ -821,7 +823,7 @@ spec: {{% /code-placeholders %}} -### Provide a custom certificate authority bundle {metadata="Optional"} +### Provide a custom certificate authority bundle {note="Optional"} InfluxDB attempts to make TLS connections to the services it depends on; notably the [Catalog](/influxdb/clustered/reference/internals/storage-engine/#catalog), From 59d8958ebc38be7f437626a929548558771844f2 Mon Sep 17 00:00:00 2001 From: Jennifer Moore Date: Thu, 11 Jul 2024 18:03:38 -0500 Subject: [PATCH 24/96] docs(clustered): initial license onboarding guide (#5508) * docs(clustered): initial license onboarding guide * refactor: run `deno fmt` * fix(clustered): make room for documents in between existing ones * fix(clustered): introduce sensible deterministic order for reference documents * refactor: separate license installation from license reference * fix: markdown typo, confusing verbiage * Licensing content and structure updates (#5520) * WIP licensing content * changes to licensing content and structure * Update content/influxdb/clustered/admin/licensing.md Co-authored-by: Jennifer Moore * updated TOC link to license recovery section * Apply suggestions from code review Co-authored-by: Jason Stirnaman * updated license recovery content --------- Co-authored-by: Jennifer Moore Co-authored-by: Jason Stirnaman * ported edits to new feature branch --------- Co-authored-by: Scott Anderson Co-authored-by: wayne warren Co-authored-by: Scott Anderson Co-authored-by: Jason Stirnaman --- .../clustered/admin/databases/_index.md | 2 +- content/influxdb/clustered/admin/licensing.md | 169 ++++++++++++++++++ .../influxdb/clustered/admin/tables/_index.md | 2 +- .../influxdb/clustered/admin/tokens/_index.md | 2 +- content/influxdb/clustered/install/auth.md | 2 +- .../clustered/install/configure-cluster.md | 4 +- content/influxdb/clustered/install/deploy.md | 5 +- .../influxdb/clustered/install/licensing.md | 104 +++++++++++ .../clustered/install/prerequisites.md | 2 +- .../clustered/install/use-your-cluster.md | 2 +- .../report-query-performance-issues.md | 2 +- .../clustered/reference/api/_index.md | 2 +- .../clustered/reference/cli/_index.md | 2 +- .../reference/client-libraries/_index.md | 2 +- .../influxdb/clustered/reference/glossary.md | 2 +- .../clustered/reference/influxql/_index.md | 2 +- .../clustered/reference/internals/_index.md | 4 +- .../reference/release-notes/_index.md | 2 +- .../clustered/reference/sample-data.md | 2 +- .../clustered/reference/sql/_index.md | 2 +- .../clustered/reference/syntax/_index.md | 2 +- yarn.lock | 97 +++++----- 22 files changed, 347 insertions(+), 68 deletions(-) create mode 100644 content/influxdb/clustered/admin/licensing.md create mode 100644 content/influxdb/clustered/install/licensing.md diff --git a/content/influxdb/clustered/admin/databases/_index.md b/content/influxdb/clustered/admin/databases/_index.md index 3d2ecb010..1dbb4a19a 100644 --- a/content/influxdb/clustered/admin/databases/_index.md +++ b/content/influxdb/clustered/admin/databases/_index.md @@ -9,7 +9,7 @@ description: > menu: influxdb_clustered: parent: Administer InfluxDB Clustered -weight: 101 +weight: 102 influxdb/clustered/tags: [databases] --- diff --git a/content/influxdb/clustered/admin/licensing.md b/content/influxdb/clustered/admin/licensing.md new file mode 100644 index 000000000..0e0ede2a5 --- /dev/null +++ b/content/influxdb/clustered/admin/licensing.md @@ -0,0 +1,169 @@ +--- +title: Manage your InfluxDB Clustered license +description: > + Install and manage your InfluxDB Clustered license to authorize the use of + the InfluxDB Clustered software. +menu: + influxdb_clustered: + parent: Administer InfluxDB Clustered + name: Manage your license +weight: 101 +influxdb/clustered/tags: [licensing] +related: + - /influxdb/clustered/install/licensing/ + - /influxdb/clustered/admin/upgrade/ +--- + +Install and manage your InfluxDB Clustered license to authorize the use of +the InfluxDB Clustered software. + +- [Install your InfluxDB license](#install-your-influxdb-license) +- [Recover from a license misconfiguration](#recover-from-a-license-misconfiguration) +- [Renew your license](#renew-your-license) +- [License enforcement](#license-enforcement) + - [A valid license is required](#a-valid-license-is-required) + - [Periodic license checks](#periodic-license-checks) + - [License grace periods](#license-grace-periods) + - [License expiry logs](#license-expiry-logs) + - [Query brownout](#query-brownout) + +{{% note %}} +#### License enforcement is currently an opt-in feature + +In currently available versions of InfluxDB Clustered, license enforcement is an +opt-in feature that allows InfluxData to introduce license enforcement to +customers, and allows customers to deactivate the feature if issues arise. +In the future, all releases of InfluxDB Clustered will require customers to +configure an active license before they can use the product. + +To opt into license enforcement, include the `useLicensedBinaries` feature flag +in your `AppInstance` resource _([See the example below](#enable-feature-flag))_. +To deactivate license enforcement, remove the `useLicensedBinaries` feature flag. +{{% /note %}} + +## Install your InfluxDB license + +{{% note %}} +If setting up an InfluxDB Clustered deployment for the first time, first +[set up the prerequisites](/influxdb/clustered/install/licensing/) and +[configure your cluster](/influxdb/clustered/install/configure-cluster/). +After your InfluxDB namespace is created and prepared, you will be able to +install your license. +{{% /note %}} + +1. If you haven't already, + [request an InfluxDB Clustered license](https://influxdata.com/contact-sales). +2. InfluxData provides you with a `license.yml` file that encapsulates your + license token as a custom Kubernetes resource. +3. Use `kubectl` to apply and create the `License` resource in your InfluxDB + namespace: + + ```sh + kubectl apply --filename license.yml --namespace influxdb + ``` + +4. + Update your `AppInstance` resource to include the `useLicensedBinaries` feature flag. + Add the `useLicensedBinaries` entry to the `.spec.package.spec.featureFlags` + property--for example: + + ```yml + apiVersion: kubecfg.dev/v1alpha1 + kind: AppInstance + # ... + spec: + package: + spec: + featureFlags: + - useLicensedBinaries + ``` + +InfluxDB Clustered detects the `License` resource and extracts the credentials +into a secret required by InfluxDB Clustered Kubernetes pods. +Pods validate the license secret both at startup and periodically (roughly once +per hour) while running. + +## Recover from a license misconfiguration + +If you deploy a licensed release of InfluxDB Clustered without an invalid or +expired license, many of the pods in your cluster will crash on startup and will +likely enter a `CrashLoopBackoff` state without ever running or becoming healthy. +Because the license is stored in a volume-mounted Kubernetes secret, invalid +licenses affect both old and new pods. + +Once a valid `License` resource is applied, new pods will begin to start up normally. +Licenses are validated when the `License` resource is applied. If the license +is invalid when you attempt to apply it, the InfluxDB clustered license +controller will not add or update the required secret. + +## Renew your license + +In advance of your license expiration, your InfluxData sales representative will +contact you regarding license renewal. +You may also contact your sales representative at any time. + +--- + +## License enforcement + +InfluxDB Clustered authorizes use of InfluxDB software through licenses issued +by InfluxData. The following sections provide information about InfluxDB Clustered +license enforcement. + +### A valid license is required + +_When you include the `useLicensedBinaries` feature flag_, +Kubernetes pods running in your InfluxDB cluster must have a valid `License` +resource to run. Licenses are issued by InfluxData. If there is no `License` +resource installed in your cluster, one of two things may happen: + +- Pods may become stuck in a `ContainerCreating` state if the cluster has + never had a valid `License` resource installed. +- If an expired or invalid license is installed in the cluster, pods will become + stuck in a `CrashLoopBackoff` state. + Pod containers will attempt to start, detect the invalid license condition, + print an error message, and then exit with a non-zero exit code. + +### Periodic license checks + +During normal operation, pods in your InfluxDB cluster check for a valid license +once per hour. You may see messages in your pod logs related to this behavior. + +### License grace periods + +When InfluxData issues a license, it is configured with two expiry dates. +The first is the expiry date of the contractual license. The second is a hard +expiry of the license credentials, after which pods in your cluster will begin +crash-looping until a new, valid license is installed in the cluster. + +The period of time between the contractual license expiry and the hard license +expiry is considered the _grace period_. The standard grace period is 90 days, +but this may be negotiated as needed with your InfluxData sales representative. + +#### License expiry logs + +The following table outlines license expiry logging behavior to show when the log +messages begin, the level (`Warn` or `Error`), and the periodicity at which they +repeat. + +| Starts at | Log level | Log periodicity | +| :-------------------- | :-------- | :-------------- | +| 1 month before expiry | Warn | 1 msg per hour | +| 1 week before expiry | Warn | 1 msg per 5 min | +| At expiry | Error | 1 msg per 5 min | + +#### Query brownout + +Starting one month after your contractual license expiry, the InfluxDB +[Querier](/influxdb/clustered/reference/internals/storage-engine/#querier) +begins "browning out" requests. Brownouts return +`FailedPrecondition` response codes to queries for a portion of every hour. + +| Starts at | Brownout coverage | +| :------------------- | :----------------- | +| 7 days after expiry | 5 minutes per hour | +| 1 month after expiry | 100% of queries | + +**Brownouts only occur after the license has contractually expired**. +Also, they **only impact query operations**--no other operations (writes, +compaction, garbage collection, etc) are affected. diff --git a/content/influxdb/clustered/admin/tables/_index.md b/content/influxdb/clustered/admin/tables/_index.md index f71a3bdfe..991704d89 100644 --- a/content/influxdb/clustered/admin/tables/_index.md +++ b/content/influxdb/clustered/admin/tables/_index.md @@ -8,7 +8,7 @@ description: > menu: influxdb_clustered: parent: Administer InfluxDB Clustered -weight: 101 +weight: 102 influxdb/clustered/tags: [tables] --- diff --git a/content/influxdb/clustered/admin/tokens/_index.md b/content/influxdb/clustered/admin/tokens/_index.md index 6234f3471..26823dfe9 100644 --- a/content/influxdb/clustered/admin/tokens/_index.md +++ b/content/influxdb/clustered/admin/tokens/_index.md @@ -8,7 +8,7 @@ description: > menu: influxdb_clustered: parent: Administer InfluxDB Clustered -weight: 101 +weight: 102 influxdb/clustered/tags: [tokens] --- diff --git a/content/influxdb/clustered/install/auth.md b/content/influxdb/clustered/install/auth.md index ca2a1ead4..d45f45720 100644 --- a/content/influxdb/clustered/install/auth.md +++ b/content/influxdb/clustered/install/auth.md @@ -6,7 +6,7 @@ menu: influxdb_clustered: name: Set up authentication parent: Install InfluxDB Clustered -weight: 102 +weight: 120 --- Administrative access to your InfluxDB cluster is managed through your identity diff --git a/content/influxdb/clustered/install/configure-cluster.md b/content/influxdb/clustered/install/configure-cluster.md index 443ac2f06..a3e8dd584 100644 --- a/content/influxdb/clustered/install/configure-cluster.md +++ b/content/influxdb/clustered/install/configure-cluster.md @@ -7,7 +7,7 @@ menu: influxdb_clustered: name: Configure your cluster parent: Install InfluxDB Clustered -weight: 103 +weight: 130 related: - /influxdb/clustered/admin/upgrade/ --- @@ -876,4 +876,4 @@ be last in the bundle. name: custom-ca ``` -{{< page-nav prev="/influxdb/clustered/install/auth/" prevText="Set up authentication" next="/influxdb/clustered/install/deploy/" nextText="Deploy your cluster" >}} +{{< page-nav prev="/influxdb/clustered/install/auth/" prevText="Set up authentication" next="/influxdb/clustered/install/licensing/" nextText="Install your license" >}} diff --git a/content/influxdb/clustered/install/deploy.md b/content/influxdb/clustered/install/deploy.md index 04bb84daa..cb6d95142 100644 --- a/content/influxdb/clustered/install/deploy.md +++ b/content/influxdb/clustered/install/deploy.md @@ -6,9 +6,10 @@ menu: influxdb_clustered: name: Deploy your cluster parent: Install InfluxDB Clustered -weight: 104 +weight: 140 related: - /influxdb/clustered/admin/upgrade/ + - /influxdb/clustered/install/licensing/ --- Use Kubernetes and related tools to deploy your InfluxDB cluster. @@ -172,4 +173,4 @@ influxdb iox-shared-querier-7f5998b9b-fpt62 4/4 Running 1 (6 influxdb kubit-apply-influxdb-g6qpx 0/1 Completed 0 8s ``` -{{< page-nav prev="/influxdb/clustered/install/configure-cluster/" prevText="Configure your cluster" next="/influxdb/clustered/install/use-your-cluster/" nextText="Use your cluster" >}} +{{< page-nav prev="/influxdb/clustered/install/licensing/" prevText="Install your license" next="/influxdb/clustered/install/use-your-cluster/" nextText="Use your cluster" >}} diff --git a/content/influxdb/clustered/install/licensing.md b/content/influxdb/clustered/install/licensing.md new file mode 100644 index 000000000..1d913fe4b --- /dev/null +++ b/content/influxdb/clustered/install/licensing.md @@ -0,0 +1,104 @@ +--- +title: Install your InfluxDB Clustered license +description: > + Install your InfluxDB Clustered license to authorize the use of the InfluxDB + Clustered software. +menu: + influxdb_clustered: + name: Install your License + parent: Install InfluxDB Clustered +weight: 135 +influxdb/clustered/tags: [licensing] +related: + - /influxdb/clustered/admin/licensing/ + - /influxdb/clustered/admin/upgrade/ +--- + +Install your InfluxDB Clustered license in your cluster to authorize the use +of the InfluxDB Clustered software. + +{{% note %}} +#### License enforcement is currently an opt-in feature + +In currently available versions of InfluxDB Clustered, license enforcement is an +opt-in feature that allows InfluxData to introduce license enforcement to +customers, and allows customers to deactivate the feature if issues arise. +In the future, all releases of InfluxDB Clustered will require customers to +configure an active license before they can use the product. + +To opt into license enforcement, include the `useLicensedBinaries` feature flag +in your `AppInstance` resource _([See the example below](#enable-feature-flag))_. +To deactivate license enforcement, remove the `useLicensedBinaries` feature flag. +{{% /note %}} + +## Install your InfluxDB license + +1. If you haven't already, + [request an InfluxDB Clustered license](https://influxdata.com/contact-sales). +2. InfluxData provides you with a `license.yml` file that encapsulates your + license token as a custom Kubernetes resource. +3. Use `kubectl` to apply and create the `License` resource in your InfluxDB + namespace: + + ```sh + kubectl apply --filename license.yml --namespace influxdb + ``` + +4. + Update your `AppInstance` resource to enable the `useLicensedBinaries` feature flag. + Add the `useLicensedBinaries` entry to the `.spec.package.spec.featureFlags` + property--for example: + + ```yml + apiVersion: kubecfg.dev/v1alpha1 + kind: AppInstance + # ... + spec: + package: + spec: + featureFlags: + - useLicensedBinaries + ``` + +InfluxDB Clustered detects the `License` resource and extracts the credentials +into a secret required by InfluxDB Clustered Kubernetes pods. +Pods validate the license secret both at startup and periodically (roughly once +per hour) while running. + +## Upgrade from a non-licensed release + +If you are currently using a non-licensed preview release of InfluxDB Clustered +and want to upgrade to a licensed release, do the following: + +1. [Install an InfluxDB license](#install-your-influxdb-license) +2. In your `myinfluxdb.yml`, update the package version defined in + `spec.package.image` to use a licensed release. + + {{% warn %}} +#### Upgrade to checkpoint releases first + +When upgrading InfluxDB Clustered, always upgrade to each +[checkpoint release](/influxdb/clustered/admin/upgrade/#checkpoint-releases) +first, before proceeding to newer versions. +Upgrading past a checkpoint release without first upgrading to it may result in +corrupt or lost data. + {{% /warn %}} + +{{% code-placeholders "PACKAGE_VERSION" %}} + +```yml +apiVersion: kubecfg.dev/v1alpha1 +kind: AppInstance +# ... +spec: + package: + # ... + image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:PACKAGE_VERSION +``` + +{{% /code-placeholders %}} + +Replace {{% code-placeholder-key %}}`PACKAGE_VERSION`{{% /code-placeholder-key %}} with +the version number to upgrade to. + +{{< page-nav prev="/influxdb/clustered/install/configure-cluster/" prevText="Configure your cluster" next="/influxdb/clustered/install/deploy/" nextText="Deploy your cluster" >}} diff --git a/content/influxdb/clustered/install/prerequisites.md b/content/influxdb/clustered/install/prerequisites.md index 29e61cd43..5cd4b0a9c 100644 --- a/content/influxdb/clustered/install/prerequisites.md +++ b/content/influxdb/clustered/install/prerequisites.md @@ -8,7 +8,7 @@ menu: influxdb_clustered: name: Prerequisites parent: Install InfluxDB Clustered -weight: 101 +weight: 110 --- InfluxDB Clustered requires the following prerequisites: diff --git a/content/influxdb/clustered/install/use-your-cluster.md b/content/influxdb/clustered/install/use-your-cluster.md index 2c9643818..9e7debdbe 100644 --- a/content/influxdb/clustered/install/use-your-cluster.md +++ b/content/influxdb/clustered/install/use-your-cluster.md @@ -6,7 +6,7 @@ menu: influxdb_clustered: name: Use your cluster parent: Install InfluxDB Clustered -weight: 105 +weight: 150 --- Now that your InfluxDB cluster is deployed, you can use and test it. diff --git a/content/influxdb/clustered/query-data/troubleshoot-and-optimize/report-query-performance-issues.md b/content/influxdb/clustered/query-data/troubleshoot-and-optimize/report-query-performance-issues.md index 4d6ccf441..0bd923a5b 100644 --- a/content/influxdb/clustered/query-data/troubleshoot-and-optimize/report-query-performance-issues.md +++ b/content/influxdb/clustered/query-data/troubleshoot-and-optimize/report-query-performance-issues.md @@ -173,7 +173,7 @@ tar -czf "${DATETIME}-cluster-info.tar.gz" "${DATETIME}-cluster-info/" #### Query analysis -**Outputs (InfluxQl):** +**Outputs (InfluxQL):** - `explain.csv` - `explain-verbose.csv` diff --git a/content/influxdb/clustered/reference/api/_index.md b/content/influxdb/clustered/reference/api/_index.md index 1ba05d8be..49009b350 100644 --- a/content/influxdb/clustered/reference/api/_index.md +++ b/content/influxdb/clustered/reference/api/_index.md @@ -8,7 +8,7 @@ menu: influxdb_clustered: parent: Reference name: InfluxDB HTTP API -weight: 104 +weight: 127 influxdb/clustered/tags: [api] --- diff --git a/content/influxdb/clustered/reference/cli/_index.md b/content/influxdb/clustered/reference/cli/_index.md index 15de48f0a..c7fa9889d 100644 --- a/content/influxdb/clustered/reference/cli/_index.md +++ b/content/influxdb/clustered/reference/cli/_index.md @@ -9,7 +9,7 @@ menu: influxdb_clustered: parent: Reference name: CLIs -weight: 104 +weight: 120 --- InfluxDB provides command line tools designed to manage and work with your diff --git a/content/influxdb/clustered/reference/client-libraries/_index.md b/content/influxdb/clustered/reference/client-libraries/_index.md index e9fa12780..02e359917 100644 --- a/content/influxdb/clustered/reference/client-libraries/_index.md +++ b/content/influxdb/clustered/reference/client-libraries/_index.md @@ -4,7 +4,7 @@ description: > InfluxDB client libraries are language-specific tools that integrate with InfluxDB APIs. View the list of available client libraries. list_title: API client libraries -weight: 105 +weight: 125 aliases: - /influxdb/clustered/reference/api/client-libraries/ - /influxdb/clustered/tools/client-libraries/ diff --git a/content/influxdb/clustered/reference/glossary.md b/content/influxdb/clustered/reference/glossary.md index c067954a5..4489c1432 100644 --- a/content/influxdb/clustered/reference/glossary.md +++ b/content/influxdb/clustered/reference/glossary.md @@ -2,7 +2,7 @@ title: Glossary description: > Terms related to InfluxData products and platforms. -weight: 109 +weight: 180 menu: influxdb_clustered: parent: Reference diff --git a/content/influxdb/clustered/reference/influxql/_index.md b/content/influxdb/clustered/reference/influxql/_index.md index 25f21e329..d9214a790 100644 --- a/content/influxdb/clustered/reference/influxql/_index.md +++ b/content/influxdb/clustered/reference/influxql/_index.md @@ -7,7 +7,7 @@ menu: parent: Reference name: InfluxQL reference identifier: influxql-reference -weight: 102 +weight: 143 --- InfluxQL (Influx Query Language) is an SQL-like query language used to interact diff --git a/content/influxdb/clustered/reference/internals/_index.md b/content/influxdb/clustered/reference/internals/_index.md index da09bb191..eea2a7a7a 100644 --- a/content/influxdb/clustered/reference/internals/_index.md +++ b/content/influxdb/clustered/reference/internals/_index.md @@ -5,7 +5,7 @@ description: > menu: influxdb_clustered: parent: Reference -weight: 108 +weight: 130 --- -{{< children >}} \ No newline at end of file +{{< children >}} diff --git a/content/influxdb/clustered/reference/release-notes/_index.md b/content/influxdb/clustered/reference/release-notes/_index.md index 95b298419..3cb80d0e5 100644 --- a/content/influxdb/clustered/reference/release-notes/_index.md +++ b/content/influxdb/clustered/reference/release-notes/_index.md @@ -7,7 +7,7 @@ menu: influxdb_clustered: name: Release notes parent: Reference -weight: 101 +weight: 190 --- View release notes and updates for products and tools related to diff --git a/content/influxdb/clustered/reference/sample-data.md b/content/influxdb/clustered/reference/sample-data.md index a9b1b49f3..e1c022a40 100644 --- a/content/influxdb/clustered/reference/sample-data.md +++ b/content/influxdb/clustered/reference/sample-data.md @@ -8,7 +8,7 @@ menu: influxdb_clustered: name: Sample data parent: Reference -weight: 110 +weight: 182 --- Sample datasets are used throughout the {{< product-name >}} documentation to diff --git a/content/influxdb/clustered/reference/sql/_index.md b/content/influxdb/clustered/reference/sql/_index.md index 3fef4346f..713a95ac6 100644 --- a/content/influxdb/clustered/reference/sql/_index.md +++ b/content/influxdb/clustered/reference/sql/_index.md @@ -6,7 +6,7 @@ menu: influxdb_clustered: name: SQL reference parent: Reference -weight: 101 +weight: 141 related: - /influxdb/clustered/reference/internals/arrow-flightsql/ --- diff --git a/content/influxdb/clustered/reference/syntax/_index.md b/content/influxdb/clustered/reference/syntax/_index.md index a82679ba7..dc93900c7 100644 --- a/content/influxdb/clustered/reference/syntax/_index.md +++ b/content/influxdb/clustered/reference/syntax/_index.md @@ -3,7 +3,7 @@ title: Other InfluxDB syntaxes description: > InfluxDB uses a handful of languages and syntaxes to perform tasks such as writing, querying, and processing data. -weight: 105 +weight: 145 menu: influxdb_clustered: name: Other syntaxes diff --git a/yarn.lock b/yarn.lock index a266ccb7e..c9db0f1d8 100644 --- a/yarn.lock +++ b/yarn.lock @@ -45,7 +45,7 @@ "@nodelib/fs.stat" "2.0.5" run-parallel "^1.1.9" -"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2": +"@nodelib/fs.stat@^2.0.2", "@nodelib/fs.stat@2.0.5": version "2.0.5" resolved "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz" integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A== @@ -236,7 +236,7 @@ braces@^3.0.3, braces@~3.0.2: dependencies: fill-range "^7.1.1" -browserslist@^4.23.0: +browserslist@^4.23.0, "browserslist@>= 4.21.0": version "4.23.0" resolved "https://registry.npmjs.org/browserslist/-/browserslist-4.23.0.tgz" integrity sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ== @@ -401,16 +401,16 @@ color-convert@^2.0.1: dependencies: color-name "~1.1.4" -color-name@1.1.3: - version "1.1.3" - resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz" - integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw== - color-name@~1.1.4: version "1.1.4" resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz" integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== +color-name@1.1.3: + version "1.1.3" + resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz" + integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw== + colorette@^2.0.20: version "2.0.20" resolved "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz" @@ -543,7 +543,7 @@ dependency-graph@^0.11.0: discontinuous-range@1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/discontinuous-range/-/discontinuous-range-1.0.0.tgz#e38331f0844bba49b9a9cb71c771585aab1bc65a" + resolved "https://registry.npmjs.org/discontinuous-range/-/discontinuous-range-1.0.0.tgz" integrity sha512-c68LpLbO+7kP/b1Hr1qs8/BJ09F5khZGTxqxZuhzxpmwJKOgRFHJWIb9/KmqnqHhLdO55aOxFH/EGBvUQbL/RQ== duplexer2@~0.1.4: @@ -765,16 +765,16 @@ get-east-asian-width@^1.0.0: resolved "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.2.0.tgz" integrity sha512-2nk+7SIVb14QrgXFHcm84tD4bKQz0RxPuMT8Ag5KPOq7J5fEmAg0UbXdTOSHqNuHSU28k55qnceesxXRZGzKWA== -get-stdin@=8.0.0: - version "8.0.0" - resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-8.0.0.tgz#cbad6a73feb75f6eeb22ba9e01f89aa28aa97a53" - integrity sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg== - get-stdin@^9.0.0: version "9.0.0" resolved "https://registry.npmjs.org/get-stdin/-/get-stdin-9.0.0.tgz" integrity sha512-dVKBjfWisLAicarI2Sf+JuBE/DghV4UzNAVe9yhEJuzeREd3JhOTE9cUaJTeSa77fsbQUK3pcOpJfM59+VKZaA== +get-stdin@=8.0.0: + version "8.0.0" + resolved "https://registry.npmjs.org/get-stdin/-/get-stdin-8.0.0.tgz" + integrity sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg== + get-stream@^2.2.0: version "2.3.1" resolved "https://registry.npmjs.org/get-stream/-/get-stream-2.3.1.tgz" @@ -926,7 +926,7 @@ inflight@^1.0.4: once "^1.3.0" wrappy "1" -inherits@2, inherits@~2.0.0, inherits@~2.0.3: +inherits@~2.0.0, inherits@~2.0.3, inherits@2: version "2.0.4" resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz" integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== @@ -1061,7 +1061,7 @@ jsonfile@^6.0.1: jsox@^1.2.118: version "1.2.119" - resolved "https://registry.yarnpkg.com/jsox/-/jsox-1.2.119.tgz#decc12b6d3948d89460da6c5e144ee76c1d054ce" + resolved "https://registry.npmjs.org/jsox/-/jsox-1.2.119.tgz" integrity sha512-f37obwxWKKuylcaOzNlUlzfDvURSCpqTXs8yEivhvsp86D/DTIySxP4v5Qdlg24qCuzDSZ0mJr3krc/f7TZ/5A== keyv@^4.5.3: @@ -1240,12 +1240,17 @@ minipass@^3.0.0: dependencies: yallist "^4.0.0" +"minipass@^5.0.0 || ^6.0.2 || ^7.0.0": + version "7.1.2" + resolved "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz" + integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw== + minipass@^5.0.0: version "5.0.0" resolved "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz" integrity sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ== -"minipass@^5.0.0 || ^6.0.2 || ^7.0.0", minipass@^7.1.2: +minipass@^7.1.2: version "7.1.2" resolved "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz" integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw== @@ -1258,6 +1263,11 @@ minizlib@^2.1.1: minipass "^3.0.0" yallist "^4.0.0" +mkdirp@^1.0.3: + version "1.0.4" + resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz" + integrity sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw== + "mkdirp@>=0.5 0": version "0.5.6" resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz" @@ -1265,14 +1275,9 @@ minizlib@^2.1.1: dependencies: minimist "^1.2.6" -mkdirp@^1.0.3: - version "1.0.4" - resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz" - integrity sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw== - moo@^0.5.0: version "0.5.2" - resolved "https://registry.yarnpkg.com/moo/-/moo-0.5.2.tgz#f9fe82473bc7c184b0d32e2215d3f6e67278733c" + resolved "https://registry.npmjs.org/moo/-/moo-0.5.2.tgz" integrity sha512-iSAJLHYKnX41mKcJKjqvnAN9sf0LMDTXDEvFv+ffuRR9a1MIuXLjMNL6EsnDHSkKLTWNqQQ5uo61P4EbU4NU+Q== ms@2.1.2: @@ -1287,7 +1292,7 @@ nanoid@^3.3.7: nearley@^2.20.1: version "2.20.1" - resolved "https://registry.yarnpkg.com/nearley/-/nearley-2.20.1.tgz#246cd33eff0d012faf197ff6774d7ac78acdd474" + resolved "https://registry.npmjs.org/nearley/-/nearley-2.20.1.tgz" integrity sha512-+Mc8UaAebFzgV+KpI5n7DasuuQCHA89dmwm7JXw3TV43ukfNQ9DnBH3Mdb2g/I4Fdxc26pwimBWvjIw0UAILSQ== dependencies: commander "^2.19.0" @@ -1302,7 +1307,7 @@ node-releases@^2.0.14: node-sql-parser@^4.12.0: version "4.18.0" - resolved "https://registry.yarnpkg.com/node-sql-parser/-/node-sql-parser-4.18.0.tgz#516b6e633c55c5abbba1ca588ab372db81ae9318" + resolved "https://registry.npmjs.org/node-sql-parser/-/node-sql-parser-4.18.0.tgz" integrity sha512-2YEOR5qlI1zUFbGMLKNfsrR5JUvFg9LxIRVE+xJe962pfVLH0rnItqLzv96XVs1Y1UIR8FxsXAuvX/lYAWZ2BQ== dependencies: big-integer "^1.6.48" @@ -1508,7 +1513,7 @@ postcss-value-parser@^4.2.0: resolved "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz" integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ== -postcss@>=8.4.31: +postcss@^8.0.0, postcss@^8.1.0, postcss@>=8.0.9, postcss@>=8.4.31: version "8.4.38" resolved "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz" integrity sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A== @@ -1519,7 +1524,7 @@ postcss@>=8.4.31: prettier-plugin-sql@^0.18.0: version "0.18.0" - resolved "https://registry.yarnpkg.com/prettier-plugin-sql/-/prettier-plugin-sql-0.18.0.tgz#b30fdccf5da714b22233828d1f3536980faef485" + resolved "https://registry.npmjs.org/prettier-plugin-sql/-/prettier-plugin-sql-0.18.0.tgz" integrity sha512-E7WXooLNtWyv79sYYHtQbfvXZ5B/OOR0ySBsB2evfrfvD4wJos1OKLBvVLC/a7+7YpG30bSUTgc2DEwz8ctPmQ== dependencies: jsox "^1.2.118" @@ -1527,7 +1532,7 @@ prettier-plugin-sql@^0.18.0: sql-formatter "^15.0.2" tslib "^2.6.2" -prettier@^3.2.5: +prettier@^3.0.3, prettier@^3.2.5: version "3.2.5" resolved "https://registry.npmjs.org/prettier/-/prettier-3.2.5.tgz" integrity sha512-3/GWa9aOC0YeD7LUfvOG2NiDyhOWRvt1k+rcKhOuYnMY24iiCphgneUfJDyFXd6rZCAnuLBv6UeAULtrhT/F4A== @@ -1559,12 +1564,12 @@ quick-lru@^5.1.1: railroad-diagrams@^1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/railroad-diagrams/-/railroad-diagrams-1.0.0.tgz#eb7e6267548ddedfb899c1b90e57374559cddb7e" + resolved "https://registry.npmjs.org/railroad-diagrams/-/railroad-diagrams-1.0.0.tgz" integrity sha512-cz93DjNeLY0idrCNOH6PviZGRN9GJhsdm9hpn1YCS879fj4W+x5IFJhhkRZcwVgMmFF7R82UA/7Oh+R8lLZg6A== randexp@0.4.6: version "0.4.6" - resolved "https://registry.yarnpkg.com/randexp/-/randexp-0.4.6.tgz#e986ad5e5e31dae13ddd6f7b3019aa7c87f60ca3" + resolved "https://registry.npmjs.org/randexp/-/randexp-0.4.6.tgz" integrity sha512-80WNmd9DA0tmZrw9qQa62GPPWfuXJknrmVmLcxvq4uZBdYqb1wYoKTmnlGUchvVWe0XiLupYkBoXVOxz3C8DYQ== dependencies: discontinuous-range "1.0.0" @@ -1643,7 +1648,7 @@ restore-cursor@^4.0.0: ret@~0.1.10: version "0.1.15" - resolved "https://registry.yarnpkg.com/ret/-/ret-0.1.15.tgz#b8a4825d5bdb1fc3f6f53c2bc33f81388681c7bc" + resolved "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz" integrity sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg== reusify@^1.0.4: @@ -1656,13 +1661,6 @@ rfdc@^1.3.1: resolved "https://registry.npmjs.org/rfdc/-/rfdc-1.3.1.tgz" integrity sha512-r5a3l5HzYlIC68TpmYKlxWjmOP6wiPJ1vWv2HeLhNsRZMrCkxeqxiHlQ21oXmQ4F3SiryXBHhAD7JZqvOJjFmg== -rimraf@2: - version "2.7.1" - resolved "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz" - integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w== - dependencies: - glob "^7.1.3" - rimraf@^5.0.0: version "5.0.7" resolved "https://registry.npmjs.org/rimraf/-/rimraf-5.0.7.tgz" @@ -1670,6 +1668,13 @@ rimraf@^5.0.0: dependencies: glob "^10.3.7" +rimraf@2: + version "2.7.1" + resolved "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz" + integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w== + dependencies: + glob "^7.1.3" + run-parallel@^1.1.9: version "1.2.0" resolved "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz" @@ -1780,13 +1785,20 @@ spdx-license-ids@^3.0.0: sql-formatter@^15.0.2: version "15.3.2" - resolved "https://registry.yarnpkg.com/sql-formatter/-/sql-formatter-15.3.2.tgz#696fb84c3ce6d368b0d16248a605aa0e7a41751e" + resolved "https://registry.npmjs.org/sql-formatter/-/sql-formatter-15.3.2.tgz" integrity sha512-pNxSMf5DtwhpZ8gUcOGCGZIWtCcyAUx9oLgAtlO4ag7DvlfnETL0BGqXaISc84pNrXvTWmt8Wal1FWKxdTsL3Q== dependencies: argparse "^2.0.1" get-stdin "=8.0.0" nearley "^2.20.1" +string_decoder@~1.1.1: + version "1.1.1" + resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz" + integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== + dependencies: + safe-buffer "~5.1.0" + string-argv@~0.3.2: version "0.3.2" resolved "https://registry.npmjs.org/string-argv/-/string-argv-0.3.2.tgz" @@ -1828,13 +1840,6 @@ string-width@^7.0.0: get-east-asian-width "^1.0.0" strip-ansi "^7.1.0" -string_decoder@~1.1.1: - version "1.1.1" - resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz" - integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== - dependencies: - safe-buffer "~5.1.0" - "strip-ansi-cjs@npm:strip-ansi@^6.0.1": version "6.0.1" resolved "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz" @@ -1944,7 +1949,7 @@ to-regex-range@^5.0.1: tslib@^2.6.2: version "2.6.3" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.3.tgz#0438f810ad7a9edcde7a241c3d80db693c8cbfe0" + resolved "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz" integrity sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ== type-fest@^1.0.1: From ab52a00624f1df1a23acc2334f51cfff960716cd Mon Sep 17 00:00:00 2001 From: Jack <56563911+jdockerty@users.noreply.github.com> Date: Fri, 12 Jul 2024 00:25:59 +0100 Subject: [PATCH 25/96] docs(clustered): clarify tuning garbage collector env vars (#5517) * docs: clarify clustered gc env tunables * docs: use suggested wording Co-authored-by: Scott Anderson --------- Co-authored-by: Scott Anderson --- .../clustered/write-data/best-practices/data-lifecycle.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/content/influxdb/clustered/write-data/best-practices/data-lifecycle.md b/content/influxdb/clustered/write-data/best-practices/data-lifecycle.md index 91420dbd5..55194db8f 100644 --- a/content/influxdb/clustered/write-data/best-practices/data-lifecycle.md +++ b/content/influxdb/clustered/write-data/best-practices/data-lifecycle.md @@ -45,6 +45,9 @@ Use the following environment variables to tune the garbage collector: - `INFLUXDB_IOX_GC_PARQUETFILE_CUTOFF`: how long to retain rows in the Catalog that reference Parquet files marked for deletion. The default is `30d`. +These values tune how aggressive the garbage collector can be. A shorter duration +value means that files can be removed at a faster pace. + {{% warn %}} To ensure there is a grace period before files and references are removed, the minimum garbage collector (GC) object store and Parquet file cutoff time is From ca8ab1e4a1ee81a54f490b5d05aff09126219703 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Mon, 15 Jul 2024 15:06:59 -0600 Subject: [PATCH 26/96] InfluxDB 2.7.7 (#5521) * InfluxDB 2.7.7 * updated latest patch for oss v2 * Update content/influxdb/v2/reference/release-notes/influxdb.md Co-authored-by: Jason Stirnaman --------- Co-authored-by: Jason Stirnaman --- .../v2/reference/release-notes/influxdb.md | 130 +++++++++++++++--- data/products.yml | 2 +- 2 files changed, 113 insertions(+), 19 deletions(-) diff --git a/content/influxdb/v2/reference/release-notes/influxdb.md b/content/influxdb/v2/reference/release-notes/influxdb.md index e4b94960f..7cb723105 100644 --- a/content/influxdb/v2/reference/release-notes/influxdb.md +++ b/content/influxdb/v2/reference/release-notes/influxdb.md @@ -8,8 +8,37 @@ menu: weight: 101 --- +## v2.7.7 {date="2024-07-12"} + +### Features + +- Disable `file://` URLs when [hardening is enabled](/influxdb/v2/reference/config-options/#hardening-enabled). + +### Bug Fixes + +- Ensure `TSMBatchKeyIterator` and `FileStore` close all `TSMReader`s. +- Return `MergeIterator.Close` errors. +- Ensure `GROUP BY` queries with an offset that crosses a date time boundary + (daylight savings time, British summer time, etc.) do not fail. +- Preserve time zone information in the Task Scheduler. +- Prevent the retention service from becoming unresponsive. + +### Maintenance + +- Update Flux to v0.195.1. +- Update `logrus` to 1.9.3. +- Update `golang.org/x/net` to v0.23.0. +- Update `protocol` buffers to v5.26.1. +- Update `go` toolchain to 1.21.10. + +--- + ## v2.7.6 {date="2024-04-12"} +### Features + +- Add optional [stricter password requirements](/influxdb/v2/reference/config-options/#strong-passwords). + ### Bug Fixes - Fix `panic index out of range` error for invalid series keys. @@ -19,16 +48,19 @@ weight: 101 - Return and respect cursor errors. - Update constant time code to make password strength calculations more constant. -### Features - -- Add optional [stricter password requirements](/influxdb/v2/reference/config-options/#strong-passwords). - ### Maintenance - Upgrade to Go to 1.21.9. +--- + ## v2.7.5 {date="2024-01-05"} +### Features + +- Add authenticating ID and user ID to request logging. +- Write detailed logs from endpoint detection and response (EDR) failures. + ### Bug Fixes - Only execute `init_config` on install. @@ -36,32 +68,35 @@ weight: 101 - Correctly return `4xx` errors instead of `5xx` errors. - Prevent retention service from creating orphaned shard files. -### Features - -- Add authenticating ID and user ID to request logging. -- Write detailed logs from endpoint detection and response (EDR) failures. - ### Maintenance - Emit build commands during tests. - Upgrade Flux to 0.194.5. +--- + ## v2.7.4 {date="2023-11-14"} _Internal changes only._ +--- + ## v2.7.3 {date="2023-10-17"} ### Maintenance - Upgrade Flux to 0.194.3. +--- + ## v2.7.1 {date="2023-04-28"} ### Bug Fixes - Update the InfluxDB UI to remove non-functional Data Explorer. +--- + ## v2.7.0 {date="2023-04-05"} ### Bug Fixes @@ -89,6 +124,8 @@ _Internal changes only._ - Bump `containerd` to 1.6.18. - Bump `github.com/opencontainers/runc` from 1.1.3 to 1.1.5. +--- + ## v2.6.1 {date="2022-12-29"} ### Bug Fixes @@ -96,6 +133,8 @@ _Internal changes only._ - Update user interface (UI) to fix dashboard page crash issue. - Fix `All Access` token creation issue. +--- + ## v2.6.0 {date="2022-12-15"} ### Features @@ -122,12 +161,16 @@ _Internal changes only._ - Upgrade to Go 1.18.9. - Upgrade Flux to v0.191.0. +--- + ## v2.5.1 {date="2022-11-09"} ### Bug fixes - Fix permissions issue in Debian and Red Hat package managers. +--- + ## v2.5.0 {date="2022-11-01"} ### Features @@ -154,6 +197,8 @@ _Internal changes only._ - Upgrade to [Go 1.18.7](https://go.dev/doc/go1.18) - Upgrade to [Rust 1.63.0](https://www.rust-lang.org/) +--- + ## v2.4.0 {date="2022-08-19"} ### Features @@ -196,6 +241,8 @@ _Internal changes only._ - Upgrade to [Go 1.18.4](https://go.dev/doc/go1.18). - Upgrade to [Flux 0.179.0](/flux/v0/release-notes/#v01790). +--- + ## v2.3.0 {date="2022-06-17"} This release includes the following [maintenance](#maintenance), [features](#features), [security updates](#security-updates) and [bug fixes](#bug-fixes). @@ -255,6 +302,8 @@ Several security issues were fixed in dependencies and the toolchain used to bui - Fix rare case where measurement cardinality reported less than zero. - Resolve panic on cleaning up failed iterators. +--- + ## v2.2.0 {date="2022-04-06"} This release includes the following new [features](#features) and several [bug fixes](#bug-fixes). @@ -397,9 +446,11 @@ the toolchain used to build InfluxDB, including: - `nats-port` and `nats-max-payload-bytes` flags have been deprecated. - NATS is no longer embedded in InfluxDB. Because InfluxDB no longer requires a port for NATS, port conflict issues are reduced. - Resolve the issue that prevented the browser from tracking the cookie `expiry` correctly, causing the cookie to expire automatically when restarting the browser or changing tabs. Now, the cookie is correctly preserved. -- Allow unlimited Flux HTTP calls. Previously, HTTP requests failed silently after 100MB of data transfer. +- Allow unlimited Flux HTTP calls. Previously, HTTP requests failed silently after 100 MB of data transfer. - Remove pagination limits on the `/telegrafs` API. Previously, pagination wasn't exposed to the API, so API requests were limited to the default 20 pages. +--- + ## v2.1.1 {date="2021-11-08"} {{% note %}} @@ -542,6 +593,8 @@ For more information about each plugin, see [Telegraf plugins](/telegraf/v1/plug - Do not allow shard creation to create overlapping shards. - Don't drop shard group durations when upgrading InfluxDB. +--- + ## v2.0.9 {date="2021-09-27"} This release includes several new [features](#features) and [bug fixes](#bug-fixes). @@ -632,7 +685,7 @@ This release includes the following bug fixes and updates: #### Task updates -- Updating an inactive task no longer schedules it. Thanks @raffs! +- Updating an inactive task no longer schedules it. - Preserve comments in Flux queries when saving task definitions. #### Version maintenance @@ -640,6 +693,8 @@ This release includes the following bug fixes and updates: - Fix `X-Influxdb-Build` and `X-Influxdb-Version` response header at `/ping`. - Upgrade `influxql` to latest version and fix predicate handling for `SHOW TAG VALUES` meta queries. +--- + ## v2.0.8 {date="2021-08-13"} {{% warn %}} #### Upcoming changes to influx CLI packaging @@ -679,7 +734,9 @@ To adopt the new, separate `influx` CLI early, download the latest release from - Prevent silently dropped writes when there are overlapping shards. - Invalid requests to `/api/v2` subroutes now return 404 instead of a list of links. - Flux meta queries for `_field` take fast path if `_measurement` is the only predicate. -- Copy names from mmapped memory before closing iterator. +- Copy names from `mmap` mapped memory before closing iterator. + +--- ## v2.0.7 {date="2021-06-04"} @@ -716,6 +773,8 @@ To adopt the new, separate `influx` CLI early, download the latest release from - Correctly validate when `query-concurrency` is `0` and `query-queue-size` is greater than `0`. +--- + ## v2.0.5 General Availability {date="2021-04-27"} {{% warn %}} @@ -797,7 +856,7 @@ The prefix used for Prometheus metrics from the query controller has changed fro - Fix use-after-free bug in series ID iterator. - Fix TSM and WAL segment size check to check against the local `SegmentSize`. - Fix TSM and WAL segment size computing to correctly calculate `totalOldDiskSize`. -- Update references to the documentation site site to use current URLs. +- Update references to the documentation site to use current URLs. - Fix data race in then TSM engine when inspecting tombstone statistics. - Fix data race in then TSM cache. - Deprecate misleading `retentionPeriodHrs` key in the onboarding API. @@ -815,6 +874,8 @@ The prefix used for Prometheus metrics from the query controller has changed fro - Reduce lock contention when adding new fields and measurements. - Escape dots in community templates hostname regular expression. +--- + ## v2.0.4 General Availability {date="2021-02-04"} ### Docker @@ -891,6 +952,8 @@ The startup process automatically generates replacement `tsi1` indexes for shard - Support creating users without initial passwords in `influx user create`. - Fix incorrect errors when passing `--bucket-id` to `influx write`. +--- + ## v2.0.3 General Availability {date="2020-12-14"} ### Breaking Changes @@ -935,6 +998,8 @@ This release also defines v2-specific path defaults and provides [helper scripts - Allow for 0 (infinite) values for `--retention` in `influx setup`. - Fix panic when using a `null` value as a record or array in a Flux query. +--- + ## v2.0.2 General Availability {date="2020-11-19"} ### Breaking changes @@ -969,6 +1034,8 @@ Previously, the database retention policy (DBRP) mapping API did not match the s - Allow self-signed certificates for scraper targets. - Bump version in `package.json` so it appears correctly. +--- + ## v2.0.1 General Availability {date="2020-11-10"} InfluxDB 2.0 general availability (GA) introduces the first **production-ready** open source version of InfluxDB 2.0. This release comprises all features and bug fixes included in prior alpha, beta, and release candidate versions. @@ -996,6 +1063,8 @@ Highlights include: If you're new to InfluxDB 2.0, we recommend checking out [how to get started](/influxdb/v2/get-started/) and [InfluxDB key concepts](/influxdb/v2/reference/key-concepts/). +--- + ## v2.0.0 {date="2020-11-09"} ### Features @@ -1017,6 +1086,8 @@ If you're new to InfluxDB 2.0, we recommend checking out [how to get started](/i - Remove bucket and mapping auto-creation from `/write` 1.x compatibility API. - Fix misuse of `reflect.SliceHeader`. +--- + ## v2.0.0-rc.4 {date="2020-11-05"} ### Features @@ -1041,6 +1112,8 @@ If you're new to InfluxDB 2.0, we recommend checking out [how to get started](/i - Return an empty iterator instead of null in `tagValues`. - Fix the `/ready` response content type to return `application/json`. +--- + ## v2.0.0-rc.3 {date="2020-10-29"} ### Features @@ -1063,6 +1136,8 @@ If you're new to InfluxDB 2.0, we recommend checking out [how to get started](/i - Refactor to allow `newIndexSeriesCursor()` to accept an `influxql.Expr`. - Remove unreferenced packages. +--- + ## v2.0.0-rc.2 {date="2020-10-22"} ### Features @@ -1081,6 +1156,8 @@ If you're new to InfluxDB 2.0, we recommend checking out [how to get started](/i - Enable the new `AuthorizationService` from authorization package in the `launcher` package (`cmd\influxd\launcher`). - Update `config upgrade` to save the correct InfluxDB configuration filename. +--- + ## v2.0.0-rc.1 {date="2020-10-14"} ### Features @@ -1095,6 +1172,8 @@ If you're new to InfluxDB 2.0, we recommend checking out [how to get started](/i - Resolve issue to ensure the `influx` CLI successfully returns a single Telegraf configuration. - Ensure passwords are at least 8 characters in `influx setup`. +--- + ## v2.0.0-rc.0 {date="2020-09-29"} {{% warn %}} @@ -1161,6 +1240,8 @@ To simplify the migration for existing users of InfluxDB 1.x, this release inclu - Ensure the group annotation does not override the existing line part (measurement, field, tag, time) in a CSV group annotation. - Added `PATCH` to the list of allowed methods. +--- + ## v2.0.0-beta.16 {date="2020-08-06"} {{% warn %}} @@ -1190,6 +1271,8 @@ This release includes breaking changes: - Alerts page filter inputs now have tab indices for keyboard navigation. +--- + ## v2.0.0-beta.15 {date="2020-07-23"} ### Features @@ -1206,6 +1289,8 @@ This release includes breaking changes: - Single Stat cells render properly in Safari. - Limit variable querying when submitting queries to used variables. +--- + ## v2.0.0-beta.14 {date="2020-07-08"} ### Features @@ -1222,6 +1307,8 @@ This release includes breaking changes: - Fix issue where define query was unusable after importing a Check. - Update documentation links +--- + ## v2.0.0-beta.13 {date="2020-06-25"} ### Features @@ -1251,6 +1338,8 @@ This release includes breaking changes: - Validate `host-url` for `influx config create` and `influx config set` commands. - Fix `influx` CLI flags to accurately depict flags for all commands. +--- + ## v2.0.0-beta.12 {date="2020-06-12"} ### Features @@ -1281,6 +1370,8 @@ This release includes breaking changes: - Reduce the number of variables being hydrated when toggling variables. - Redesign dashboard cell loading indicator to be more obvious. +--- + ## v2.0.0-beta.11 {date="2020-05-27"} {{% warn %}} @@ -1303,6 +1394,8 @@ The beta 11 version was **not released**. Changes below are included in the beta - Resolve scrollbar issues to ensure datasets are visible and scrollable. - Check status now displays a warning if loading a large amount. +--- + ## v2.0.0-beta.10 {date="2020-05-07"} ### Features @@ -1368,7 +1461,7 @@ The beta 11 version was **not released**. Changes below are included in the beta - Make all `pkg` resources unique by `metadata.name` field. - Ensure Telegraf configuration tokens aren't retrievable after creation. New tokens can be created after Telegraf has been setup. - [Delete bucket by name](/influxdb/v2/admin/buckets/delete-bucket/#delete-a-bucket-by-name) using the `influx` CLI. -- Add helper module to write line protocol to specified url, org, and bucket. +- Add helper module to write line protocol to specified URL, organization, and bucket. - Add [`pkg stack`](/influxdb/v2/reference/cli/influx/stacks) for stateful package management. - Add `--no-tasks` flag to `influxd` to disable scheduling of tasks. - Add ability to output CLI output as JSON and hide table headers. @@ -1710,8 +1803,9 @@ The beta 11 version was **not released**. Changes below are included in the beta #### Known Issues The version of Flux included in Alpha 14 introduced `null` support. -Most issues related to the `null` implementation have been fixed, but one known issue remains – -The `map()` function panics if the first record processed has a `null` value. +Most issues related to the `null` implementation have been fixed, but one known +issue remains--the `map()` function panics if the first record processed has a +`null` value. --- @@ -1918,7 +2012,7 @@ This release includes a breaking change to the format in which Time-Structured M _**Existing local data will not be queryable after upgrading to this release.**_ Prior to installing this release, remove all storage-engine data from your local InfluxDB 2.x installation. -To remove only TSM and index data and preserve all other other InfluxDB 2.x data (organizations, buckets, settings, etc), +To remove only TSM and index data and preserve all other InfluxDB 2.x data (organizations, buckets, settings, etc), run the following command. ###### Linux and macOS @@ -2011,7 +2105,7 @@ Once completed, InfluxDB v2.0.0-alpha.5 can be started. - Change the wording for the plugin config form button to "Done." - Change the wording for the Collectors configure step button to "Create and Verify." - Standardize page loading spinner styles. -- Show checkbox on "Save As" button in data explorer. +- Show checkbox on "Save As" button in Data Explorer. - Make collectors plugins side bar visible in only the configure step. - Swap retention policies on Create bucket page. diff --git a/data/products.yml b/data/products.yml index be2ace52e..0179bf4e7 100644 --- a/data/products.yml +++ b/data/products.yml @@ -21,7 +21,7 @@ influxdb: - v1.7 latest: v2.7 latest_patches: - v2: 2.7.6 + v2: 2.7.7 v1: 1.8.10 latest_cli: v2: 2.7.5 From a1fb10eb1cc296b8b5a24f7275c360ea498cf319 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Wed, 17 Jul 2024 11:31:37 -0600 Subject: [PATCH 27/96] Remove limited access banner from Clustered (#5524) * remove limited access banner from clustered, closes #5523 * fix typo * update clustered install intro --- content/influxdb/clustered/install/_index.md | 8 ++++++++ layouts/index.html | 2 +- layouts/partials/article/limited-availability.html | 3 ++- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/content/influxdb/clustered/install/_index.md b/content/influxdb/clustered/install/_index.md index 374b4d438..8fe1cac7f 100644 --- a/content/influxdb/clustered/install/_index.md +++ b/content/influxdb/clustered/install/_index.md @@ -10,6 +10,14 @@ InfluxDB Clustered is deployed and managed using Kubernetes. This multi-page guide walks through setting up prerequisites and configuring your InfluxDB cluster deployment. +InfluxDB Clustered is a commercial product offered by InfluxData, the creators +of InfluxDB. Please contact InfluxData Sales to obtain a license before +installing InfluxDB Clustered. + +Contact InfluxData Sales + +## Setup, configure, and deploy InfluxDB Clustered + {{< children type="ordered-list" >}} diff --git a/layouts/index.html b/layouts/index.html index 164705e15..7b15cec16 100644 --- a/layouts/index.html +++ b/layouts/index.html @@ -33,7 +33,7 @@

Self-managed

-
+

InfluxDB Clustered

Highly available InfluxDB 3.0 cluster built for high write and query workloads on your own infrastructure.