Revert "feat(ui): redesign load data page (#19246)" (#19336)

This reverts commit 67857f25ae.
pull/19343/head
alexpaxton 2020-08-14 17:27:56 -07:00 committed by GitHub
parent 67857f25ae
commit 0111f024d5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
460 changed files with 2927 additions and 61689 deletions

View File

@ -2,7 +2,6 @@
### Features
1. [19246](https://github.com/influxdata/influxdb/pull/19246): Redesign load data page to increase discovery and ease of use
1. [19334](https://github.com/influxdata/influxdb/pull/19334): Add --active-config flag to influx to set config for single command
### Bug Fixes

View File

@ -0,0 +1,68 @@
describe('Client Libraries', () => {
beforeEach(() => {
cy.flush()
cy.signin().then(({body}) => {
const {
org: {id},
} = body
cy.wrap(body.org).as('org')
cy.fixture('routes').then(({orgs}) => {
cy.visit(`${orgs}/${id}/load-data/client-libraries`)
})
})
})
it('open arduino popup', () => {
cy.getByTestID('client-libraries-cards--arduino').click()
cy.getByTestID('overlay--header').contains('Arduino Client Library')
})
it('open csharp popup', () => {
cy.getByTestID('client-libraries-cards--csharp').click()
cy.getByTestID('overlay--header').contains('C# Client Library')
})
it('open go popup', () => {
cy.getByTestID('client-libraries-cards--go').click()
cy.getByTestID('overlay--header').contains('GO Client Library')
})
it('open java popup', () => {
cy.getByTestID('client-libraries-cards--java').click()
cy.getByTestID('overlay--header').contains('Java Client Library')
})
it('open javascript popup', () => {
cy.getByTestID('client-libraries-cards--javascript-node').click()
cy.getByTestID('overlay--header').contains(
'JavaScript/Node.js Client Library'
)
})
it('open Kotlin popup', () => {
cy.getByTestID('client-libraries-cards--kotlin').click()
cy.getByTestID('overlay--header').contains('Kotlin Client Library')
})
it('open php popup', () => {
cy.getByTestID('client-libraries-cards--php').click()
cy.getByTestID('overlay--header').contains('PHP Client Library')
})
it('open python popup', () => {
cy.getByTestID('client-libraries-cards--python').click()
cy.getByTestID('overlay--header').contains('Python Client Library')
})
it('open ruby popup', () => {
cy.getByTestID('client-libraries-cards--ruby').click()
cy.getByTestID('overlay--header').contains('Ruby Client Library')
})
it('open scala popup', () => {
cy.getByTestID('client-libraries-cards--scala').click()
cy.getByTestID('overlay--header').contains('Scala Client Library')
})
})

View File

@ -64,12 +64,7 @@ describe('DataExplorer', () => {
cy.get('.view-lines').type(fluxCode)
})
cy.contains('Submit').click()
cy.get('.cf-tree-nav--toggle').click()
// Can't use the testID to select this nav item because Clockface is silly and uses the same testID twice
// Issue: https://github.com/influxdata/clockface/issues/539
cy.get('.cf-tree-nav--sub-item-label')
.contains('Buckets')
.click()
cy.getByTestID('nav-item-load-data').click()
cy.getByTestID('bucket--card--name _tasks').click()
cy.getByTestID('query-builder').should('exist')
})

View File

@ -1,50 +0,0 @@
describe('Load Data Sources', () => {
beforeEach(() => {
cy.flush()
cy.signin().then(({body}) => {
const {
org: {id},
} = body
cy.wrap(body.org).as('org')
cy.fixture('routes').then(({orgs}) => {
cy.visit(`${orgs}/${id}/load-data/sources`)
})
})
})
it('navigate to Client Library details view and render it with essentials', () => {
cy.getByTestID('write-data--section client-libraries').within(() => {
cy.getByTestID('square-grid').within(() => {
cy.getByTestIDSubStr('load-data-item')
.first()
.click()
})
})
const contentContainer = cy.getByTestID('load-data-details-content')
contentContainer.should('exist')
contentContainer.children().should('exist')
const logoElement = cy.getByTestID('load-data-details-thumb')
logoElement.should('exist')
})
it('navigate to Telegraf Plugin details view and render it with essentials', () => {
cy.getByTestID('write-data--section telegraf-plugins').within(() => {
cy.getByTestID('square-grid').within(() => {
cy.getByTestIDSubStr('load-data-item')
.first()
.click()
})
})
const contentContainer = cy.getByTestID('load-data-details-content')
contentContainer.should('exist')
contentContainer.children().should('exist')
const logoElement = cy.getByTestID('load-data-details-thumb')
logoElement.should('exist')
})
})

18
ui/global.d.ts vendored
View File

@ -10,21 +10,11 @@ declare global {
interface Window {
monaco: MonacoType
}
declare module '*.png' {
const value: any
export = value
}
}
declare module '*.md' {
const value: string
export default value
}
declare module '*.svg' {
export const ReactComponent: SFC<SVGProps<SVGSVGElement>>
const src: string
export default src
}
declare module "*.png" {
const value: any;
export = value;
}
window.monaco = window.monaco || {}

11
ui/index.d.ts vendored
View File

@ -1425,12 +1425,7 @@ declare module 'src/external/dygraph' {
// Allow typescript to recognize json files
declare module '*.json' {
const value: any
export default value
}
declare module '*.md' {
const value: string
export default value
declare module "*.json" {
const value: any;
export default value;
}

View File

@ -115,7 +115,7 @@
"mutation-observer": "^1.0.3",
"optimize-css-assets-webpack-plugin": "^5.0.3",
"prettier": "^1.19.1",
"raw-loader": "^4.0.1",
"raw-loader": "^4.0.0",
"sass": "^1.22.7",
"sass-loader": "^7.1.0",
"source-map-loader": "^0.2.4",

View File

@ -37,7 +37,7 @@ export default class ViewTokenOverlay extends PureComponent<Props> {
}
return (
<Overlay.Container maxWidth={830}>
<Overlay.Container>
<Overlay.Header
title={description}
onDismiss={this.handleDismiss}

View File

@ -1,5 +1,6 @@
// Libraries
import React, {Component} from 'react'
import {connect, ConnectedProps} from 'react-redux'
import {Switch, Route} from 'react-router-dom'
// Components
@ -16,22 +17,28 @@ import {
// Utils
import {pageTitleSuffixer} from 'src/shared/utils/pageTitles'
import {getOrg} from 'src/organizations/selectors'
// Types
import {ResourceType} from 'src/types'
import {AppState, ResourceType} from 'src/types'
import {ORGS, ORG_ID, TOKENS} from 'src/shared/constants/routes'
const tokensPath = `/${ORGS}/${ORG_ID}/load-data/${TOKENS}/generate`
type ReduxProps = ConnectedProps<typeof connector>
type Props = ReduxProps
@ErrorHandling
class TokensIndex extends Component {
class TokensIndex extends Component<Props> {
public render() {
const {org} = this.props
return (
<>
<Page titleTag={pageTitleSuffixer(['Tokens', 'Load Data'])}>
<LoadDataHeader />
<LoadDataTabbedPage activeTab="tokens">
<LoadDataTabbedPage activeTab="tokens" orgID={org.id}>
<GetResources resources={[ResourceType.Authorizations]}>
<TokensTab />
</GetResources>
@ -52,4 +59,8 @@ class TokensIndex extends Component {
}
}
export default TokensIndex
const mstp = (state: AppState) => ({org: getOrg(state)})
const connector = connect(mstp)
export default connector(TokensIndex)

View File

@ -1,5 +1,6 @@
// Libraries
import React, {Component} from 'react'
import {connect} from 'react-redux'
import {Switch, Route} from 'react-router-dom'
// Components
@ -20,24 +21,31 @@ import {Page} from '@influxdata/clockface'
// Utils
import {pageTitleSuffixer} from 'src/shared/utils/pageTitles'
import {getOrg} from 'src/organizations/selectors'
// Constants
import {ORGS, ORG_ID, BUCKETS, BUCKET_ID} from 'src/shared/constants/routes'
// Types
import {ResourceType} from 'src/types'
import {AppState, Organization, ResourceType} from 'src/types'
interface StateProps {
org: Organization
}
const bucketsPath = `/${ORGS}/${ORG_ID}/load-data/${BUCKETS}/${BUCKET_ID}`
@ErrorHandling
class BucketsIndex extends Component {
class BucketsIndex extends Component<StateProps> {
public render() {
const {org} = this.props
return (
<>
<Page titleTag={pageTitleSuffixer(['Buckets', 'Load Data'])}>
<LimitChecker>
<LoadDataHeader />
<LoadDataTabbedPage activeTab="buckets">
<LoadDataTabbedPage activeTab="buckets" orgID={org.id}>
<GetResources
resources={[
ResourceType.Buckets,
@ -80,4 +88,10 @@ class BucketsIndex extends Component {
}
}
export default BucketsIndex
const mstp = (state: AppState) => {
const org = getOrg(state)
return {org}
}
export default connect<StateProps, {}, {}>(mstp, null)(BucketsIndex)

View File

@ -0,0 +1,98 @@
// Libraries
import React, {FunctionComponent} from 'react'
import {connect} from 'react-redux'
// Components
import ClientLibraryOverlay from 'src/clientLibraries/components/ClientLibraryOverlay'
import TemplatedCodeSnippet from 'src/shared/components/TemplatedCodeSnippet'
// Constants
import {clientArduinoLibrary} from 'src/clientLibraries/constants'
// Types
import {AppState} from 'src/types'
// Selectors
import {getOrg} from 'src/organizations/selectors'
interface StateProps {
org: string
}
type Props = StateProps
const ClientArduinoOverlay: FunctionComponent<Props> = props => {
const {
name,
url,
installingLibraryManagerCodeSnippet,
installingManualCodeSnippet,
initializeClientCodeSnippet,
writingDataPointCodeSnippet,
executeQueryCodeSnippet,
} = clientArduinoLibrary
const {org} = props
const server = window.location.origin
return (
<ClientLibraryOverlay title={`${name} Client Library`}>
<p>
For more detailed and up to date information check out the{' '}
<a href={url} target="_blank">
GitHub Repository
</a>
</p>
<h5>Install Library</h5>
<p>Library Manager</p>
<TemplatedCodeSnippet
template={installingLibraryManagerCodeSnippet}
label="Guide"
/>
<p>Manual Installation</p>
<TemplatedCodeSnippet
template={installingManualCodeSnippet}
label="Guide"
defaults={{
url: 'url',
}}
values={{
url,
}}
/>
<h5>Initialize the Client</h5>
<TemplatedCodeSnippet
template={initializeClientCodeSnippet}
label="Arduino Code"
defaults={{
server: 'basepath',
token: 'token',
org: 'orgID',
bucket: 'bucketID',
}}
values={{
server,
org,
}}
/>
<h5>Write Data</h5>
<TemplatedCodeSnippet
template={writingDataPointCodeSnippet}
label="Arduino Code"
/>
<h5>Execute a Flux query</h5>
<TemplatedCodeSnippet
template={executeQueryCodeSnippet}
label="Arduino Code"
/>
</ClientLibraryOverlay>
)
}
const mstp = (state: AppState) => {
return {
org: getOrg(state).id,
}
}
export {ClientArduinoOverlay}
export default connect<StateProps, {}, Props>(mstp, null)(ClientArduinoOverlay)

View File

@ -0,0 +1,113 @@
// Libraries
import React, {FunctionComponent} from 'react'
import {connect} from 'react-redux'
// Components
import ClientLibraryOverlay from 'src/clientLibraries/components/ClientLibraryOverlay'
import TemplatedCodeSnippet from 'src/shared/components/TemplatedCodeSnippet'
// Constants
import {clientCSharpLibrary} from 'src/clientLibraries/constants'
// Types
import {AppState} from 'src/types'
// Selectors
import {getOrg} from 'src/organizations/selectors'
interface StateProps {
org: string
}
type Props = StateProps
const ClientCSharpOverlay: FunctionComponent<Props> = props => {
const {
name,
url,
installingPackageManagerCodeSnippet,
installingPackageDotNetCLICodeSnippet,
packageReferenceCodeSnippet,
initializeClientCodeSnippet,
executeQueryCodeSnippet,
writingDataPointCodeSnippet: writingDataDataPointCodeSnippet,
writingDataLineProtocolCodeSnippet,
writingDataPocoCodeSnippet,
pocoClassCodeSnippet,
} = clientCSharpLibrary
const {org} = props
const server = window.location.origin
return (
<ClientLibraryOverlay title={`${name} Client Library`}>
<p>
For more detailed and up to date information check out the{' '}
<a href={url} target="_blank">
GitHub Repository
</a>
</p>
<h5>Install Package</h5>
<p>Package Manager</p>
<TemplatedCodeSnippet
template={installingPackageManagerCodeSnippet}
label="Code"
/>
<p>.NET CLI</p>
<TemplatedCodeSnippet
template={installingPackageDotNetCLICodeSnippet}
label="Code"
/>
<p>Package Reference</p>
<TemplatedCodeSnippet
template={packageReferenceCodeSnippet}
label="Code"
/>
<h5>Initialize the Client</h5>
<TemplatedCodeSnippet
template={initializeClientCodeSnippet}
label="C# Code"
defaults={{
server: 'basepath',
token: 'token',
org: 'orgID',
bucket: 'bucketID',
}}
values={{
server,
org,
}}
/>
<h5>Write Data</h5>
<p>Option 1: Use InfluxDB Line Protocol to write data</p>
<TemplatedCodeSnippet
template={writingDataLineProtocolCodeSnippet}
label="C# Code"
/>
<p>Option 2: Use a Data Point to write data</p>
<TemplatedCodeSnippet
template={writingDataDataPointCodeSnippet}
label="C# Code"
/>
<p>Option 3: Use POCO and corresponding Class to write data</p>
<TemplatedCodeSnippet
template={writingDataPocoCodeSnippet}
label="C# Code"
/>
<TemplatedCodeSnippet template={pocoClassCodeSnippet} label="C# Code" />
<h5>Execute a Flux query</h5>
<TemplatedCodeSnippet
template={executeQueryCodeSnippet}
label="C# Code"
/>
</ClientLibraryOverlay>
)
}
const mstp = (state: AppState) => {
return {
org: getOrg(state).id,
}
}
export {ClientCSharpOverlay}
export default connect<StateProps, {}, Props>(mstp, null)(ClientCSharpOverlay)

View File

@ -0,0 +1,86 @@
// Libraries
import React, {FunctionComponent} from 'react'
import {connect} from 'react-redux'
// Components
import ClientLibraryOverlay from 'src/clientLibraries/components/ClientLibraryOverlay'
import TemplatedCodeSnippet from 'src/shared/components/TemplatedCodeSnippet'
// Constants
import {clientGoLibrary} from 'src/clientLibraries/constants'
// Types
import {AppState} from 'src/types'
// Selectors
import {getOrg} from 'src/organizations/selectors'
interface StateProps {
org: string
}
type Props = StateProps
const ClientGoOverlay: FunctionComponent<Props> = props => {
const {
name,
url,
initializeClientCodeSnippet,
writingDataLineProtocolCodeSnippet,
writingDataPointCodeSnippet,
executeQueryCodeSnippet,
} = clientGoLibrary
const {org} = props
const server = window.location.origin
return (
<ClientLibraryOverlay title={`${name} Client Library`}>
<p>
For more detailed and up to date information check out the{' '}
<a href={url} target="_blank">
GitHub Repository
</a>
</p>
<h5>Initialize the Client</h5>
<TemplatedCodeSnippet
template={initializeClientCodeSnippet}
label="Go Code"
defaults={{
server: 'basepath',
token: 'token',
org: 'orgID',
bucket: 'bucketID',
}}
values={{
server,
org,
}}
/>
<h5>Write Data</h5>
<p>Option 1: Use InfluxDB Line Protocol to write data</p>
<TemplatedCodeSnippet
template={writingDataLineProtocolCodeSnippet}
label="Go Code"
/>
<p>Option 2: Use a Data Point to write data</p>
<TemplatedCodeSnippet
template={writingDataPointCodeSnippet}
label="Go Code"
/>
<h5>Execute a Flux query</h5>
<TemplatedCodeSnippet
template={executeQueryCodeSnippet}
label="Go Code"
/>
</ClientLibraryOverlay>
)
}
const mstp = (state: AppState) => {
return {
org: getOrg(state).id,
}
}
export {ClientGoOverlay}
export default connect<StateProps, {}, Props>(mstp, null)(ClientGoOverlay)

View File

@ -0,0 +1,84 @@
// Libraries
import React, {FunctionComponent} from 'react'
import {connect} from 'react-redux'
// Components
import ClientLibraryOverlay from 'src/clientLibraries/components/ClientLibraryOverlay'
import TemplatedCodeSnippet from 'src/shared/components/TemplatedCodeSnippet'
// Constants
import {clientJSLibrary} from 'src/clientLibraries/constants'
// Types
import {AppState} from 'src/types'
// Selectors
import {getOrg} from 'src/organizations/selectors'
interface StateProps {
org: string
}
type Props = StateProps
const ClientJSOverlay: FunctionComponent<Props> = props => {
const {
name,
url,
initializeNPMCodeSnippet,
initializeClientCodeSnippet,
executeQueryCodeSnippet,
writingDataLineProtocolCodeSnippet,
} = clientJSLibrary
const {org} = props
const server = window.location.origin
return (
<ClientLibraryOverlay title={`${name} Client Library`}>
<p>
For more detailed and up to date information check out the{' '}
<a href={url} target="_blank">
GitHub Repository
</a>
</p>
<h5>Install via NPM</h5>
<TemplatedCodeSnippet template={initializeNPMCodeSnippet} label="Code" />
<h5>Initialize the Client</h5>
<TemplatedCodeSnippet
template={initializeClientCodeSnippet}
label="JavaScript Code"
defaults={{
server: 'basepath',
token: 'token',
org: 'orgID',
bucket: 'bucketID',
}}
values={{
server,
org,
}}
/>
<h5>Write Data</h5>
<TemplatedCodeSnippet
template={writingDataLineProtocolCodeSnippet}
label="JavaScript Code"
/>
<h5>Execute a Flux query</h5>
<TemplatedCodeSnippet
template={executeQueryCodeSnippet}
label="JavaScript Code"
/>
</ClientLibraryOverlay>
)
}
const mstp = (state: AppState) => {
const {id} = getOrg(state)
return {
org: id,
}
}
export {ClientJSOverlay}
export default connect<StateProps, {}, Props>(mstp)(ClientJSOverlay)

View File

@ -0,0 +1,104 @@
// Libraries
import React, {FunctionComponent} from 'react'
import {connect} from 'react-redux'
// Components
import ClientLibraryOverlay from 'src/clientLibraries/components/ClientLibraryOverlay'
import TemplatedCodeSnippet from 'src/shared/components/TemplatedCodeSnippet'
// Constants
import {clientJavaLibrary} from 'src/clientLibraries/constants'
// Types
import {AppState} from 'src/types'
// Selectors
import {getOrg} from 'src/organizations/selectors'
interface StateProps {
org: string
}
type Props = StateProps
const ClientJavaOverlay: FunctionComponent<Props> = props => {
const {
name,
url,
buildWithMavenCodeSnippet,
buildWithGradleCodeSnippet,
initializeClientCodeSnippet,
executeQueryCodeSnippet,
writingDataLineProtocolCodeSnippet,
writingDataPointCodeSnippet,
writingDataPojoCodeSnippet,
pojoClassCodeSnippet,
} = clientJavaLibrary
const {org} = props
const server = window.location.origin
return (
<ClientLibraryOverlay title={`${name} Client Library`}>
<p>
For more detailed and up to date information check out the{' '}
<a href={url} target="_blank">
GitHub Repository
</a>
</p>
<h5>Add Dependency</h5>
<p>Build with Maven</p>
<TemplatedCodeSnippet template={buildWithMavenCodeSnippet} label="Code" />
<p>Build with Gradle</p>
<TemplatedCodeSnippet
template={buildWithGradleCodeSnippet}
label="Code"
/>
<h5>Initialize the Client</h5>
<TemplatedCodeSnippet
template={initializeClientCodeSnippet}
label="Java Code"
defaults={{
server: 'basepath',
token: 'token',
org: 'orgID',
bucket: 'bucketID',
}}
values={{
server,
org,
}}
/>
<h5>Write Data</h5>
<p>Option 1: Use InfluxDB Line Protocol to write data</p>
<TemplatedCodeSnippet
template={writingDataLineProtocolCodeSnippet}
label="Java Code"
/>
<p>Option 2: Use a Data Point to write data</p>
<TemplatedCodeSnippet
template={writingDataPointCodeSnippet}
label="Java Code"
/>
<p>Option 3: Use POJO and corresponding class to write data</p>
<TemplatedCodeSnippet
template={writingDataPojoCodeSnippet}
label="Java Code"
/>
<TemplatedCodeSnippet template={pojoClassCodeSnippet} label="Java Code" />
<h5>Execute a Flux query</h5>
<TemplatedCodeSnippet
template={executeQueryCodeSnippet}
label="Java Code"
/>
</ClientLibraryOverlay>
)
}
const mstp = (state: AppState) => {
return {
org: getOrg(state).id,
}
}
export {ClientJavaOverlay}
export default connect<StateProps, {}, Props>(mstp, null)(ClientJavaOverlay)

View File

@ -0,0 +1,83 @@
// Libraries
import React, {FunctionComponent} from 'react'
import {connect} from 'react-redux'
// Components
import ClientLibraryOverlay from 'src/clientLibraries/components/ClientLibraryOverlay'
import TemplatedCodeSnippet from 'src/shared/components/TemplatedCodeSnippet'
// Constants
import {clientKotlinLibrary} from 'src/clientLibraries/constants'
// Types
import {AppState} from 'src/types'
// Selectors
import {getOrg} from 'src/organizations/selectors'
interface StateProps {
org: string
}
type Props = StateProps
const ClientKotlinOverlay: FunctionComponent<Props> = props => {
const {
name,
url,
buildWithMavenCodeSnippet,
buildWithGradleCodeSnippet,
initializeClientCodeSnippet,
executeQueryCodeSnippet,
} = clientKotlinLibrary
const {org} = props
const server = window.location.origin
return (
<ClientLibraryOverlay title={`${name} Client Library`}>
<p>
For more detailed and up to date information check out the{' '}
<a href={url} target="_blank">
GitHub Repository
</a>
</p>
<h5>Add Dependency</h5>
<p>Build with Maven</p>
<TemplatedCodeSnippet template={buildWithMavenCodeSnippet} label="Code" />
<p>Build with Gradle</p>
<TemplatedCodeSnippet
template={buildWithGradleCodeSnippet}
label="Code"
/>
<h5>Initialize the Client</h5>
<TemplatedCodeSnippet
template={initializeClientCodeSnippet}
label="Kotlin Code"
defaults={{
server: 'basepath',
token: 'token',
org: 'orgID',
bucket: 'bucketID',
}}
values={{
server,
org,
}}
/>
<h5>Execute a Flux query</h5>
<TemplatedCodeSnippet
template={executeQueryCodeSnippet}
label="Kotlin Code"
/>
</ClientLibraryOverlay>
)
}
const mstp = (state: AppState) => {
return {
org: getOrg(state).id,
}
}
export {ClientKotlinOverlay}
export default connect<StateProps, {}, Props>(mstp, null)(ClientKotlinOverlay)

View File

@ -0,0 +1,70 @@
// Libraries
import _ from 'lodash'
import React, {FunctionComponent, createElement} from 'react'
import {withRouter, RouteComponentProps, Link} from 'react-router-dom'
// Components
import {
Grid,
SelectableCard,
SquareGrid,
ComponentSize,
} from '@influxdata/clockface'
import CodeSnippet from 'src/shared/components/CodeSnippet'
// Mocks
import {clientLibraries} from 'src/clientLibraries/constants'
interface OwnProps {
orgID: string
}
type Props = OwnProps & RouteComponentProps<{orgID: string}>
const ClientLibraries: FunctionComponent<Props> = ({orgID, history}) => {
return (
<Grid>
<Grid.Row>
<Grid.Column>
<p>
Use the following URL when initializing each Client Library. The
Token can be generated on the
<Link to={`/orgs/${orgID}/load-data/tokens`}>&nbsp;Tokens tab</Link>
.
</p>
<CodeSnippet copyText={window.location.origin} label="Client URL" />
</Grid.Column>
</Grid.Row>
<Grid.Row>
<Grid.Column>
<SquareGrid cardSize="200px" gutter={ComponentSize.Small}>
{clientLibraries.map(cl => {
const handleClick = (): void => {
history.push(
`/orgs/${orgID}/load-data/client-libraries/${cl.id}`
)
}
return (
<SquareGrid.Card key={cl.id}>
<SelectableCard
id={cl.id}
formName="client-libraries-cards"
label={cl.name}
testID={`client-libraries-cards--${cl.id}`}
selected={false}
onClick={handleClick}
>
{createElement(cl.image)}
</SelectableCard>
</SquareGrid.Card>
)
})}
</SquareGrid>
</Grid.Column>
</Grid.Row>
</Grid>
)
}
export default withRouter(ClientLibraries)

View File

@ -0,0 +1,26 @@
/*
Client Library Overlay
*/
.client-library-overlay {
code {
font-weight: 600;
color: $c-potassium;
}
pre {
display: inline-block;
width: 100%;
padding: $ix-marg-c;
background-color: $g0-obsidian;
border-radius: $ix-radius;
margin-bottom: $ix-marg-c;
overflow-x: auto;
&:last-of-type {
margin-bottom: 0;
}
@include custom-scrollbar-round($g0-obsidian, $c-star);
}
}

View File

@ -0,0 +1,52 @@
// Libraries
import React, {FunctionComponent} from 'react'
import {connect} from 'react-redux'
import {withRouter, RouteComponentProps} from 'react-router-dom'
// Components
import {Overlay} from '@influxdata/clockface'
// Types
import {AppState, Organization} from 'src/types'
// Selectors
import {getOrg} from 'src/organizations/selectors'
interface OwnProps {
title: string
children: React.ReactNode
}
interface StateProps {
org: Organization
}
type Props = OwnProps & StateProps & RouteComponentProps<{orgID: string}>
const ClientLibraryOverlay: FunctionComponent<Props> = ({
title,
children,
history,
org,
}) => {
const onDismiss = () => {
history.push(`/orgs/${org.id}/load-data/client-libraries`)
}
return (
<Overlay visible={true}>
<Overlay.Container maxWidth={980}>
<Overlay.Header title={title} onDismiss={onDismiss} />
<Overlay.Body className="client-library-overlay">
{children}
</Overlay.Body>
</Overlay.Container>
</Overlay>
)
}
const mstp = (state: AppState) => ({
org: getOrg(state),
})
export default connect<StateProps>(mstp)(withRouter(ClientLibraryOverlay))

View File

@ -0,0 +1,100 @@
// Libraries
import React, {FunctionComponent} from 'react'
import {connect} from 'react-redux'
// Components
import ClientLibraryOverlay from 'src/clientLibraries/components/ClientLibraryOverlay'
import TemplatedCodeSnippet from 'src/shared/components/TemplatedCodeSnippet'
// Constants
import {clientPHPLibrary} from 'src/clientLibraries/constants'
// Selectors
import {getOrg} from 'src/organizations/selectors'
// Types
import {AppState} from 'src/types'
interface StateProps {
org: string
}
type Props = StateProps
const ClientPHPOverlay: FunctionComponent<Props> = props => {
const {
name,
url,
initializeComposerCodeSnippet,
initializeClientCodeSnippet,
executeQueryCodeSnippet,
writingDataLineProtocolCodeSnippet,
writingDataPointCodeSnippet,
writingDataArrayCodeSnippet,
} = clientPHPLibrary
const {org} = props
const server = window.location.origin
return (
<ClientLibraryOverlay title={`${name} Client Library`}>
<p>
For more detailed and up to date information check out the{' '}
<a href={url} target="_blank">
GitHub Repository
</a>
</p>
<h5>Install via Composer</h5>
<TemplatedCodeSnippet
template={initializeComposerCodeSnippet}
label="Code"
/>
<h5>Initialize the Client</h5>
<TemplatedCodeSnippet
template={initializeClientCodeSnippet}
label="PHP Code"
defaults={{
server: 'basepath',
token: 'token',
org: 'orgID',
bucket: 'bucketID',
}}
values={{
server,
org,
}}
/>
<h5>Write Data</h5>
<p>Option 1: Use InfluxDB Line Protocol to write data</p>
<TemplatedCodeSnippet
template={writingDataLineProtocolCodeSnippet}
label="PHP Code"
/>
<p>Option 2: Use a Data Point to write data</p>
<TemplatedCodeSnippet
template={writingDataPointCodeSnippet}
label="PHP Code"
/>
<p>Option 3: Use an Array structure to write data</p>
<TemplatedCodeSnippet
template={writingDataArrayCodeSnippet}
label="PHP Code"
/>
<h5>Execute a Flux query</h5>
<TemplatedCodeSnippet
template={executeQueryCodeSnippet}
label="PHP Code"
/>
</ClientLibraryOverlay>
)
}
const mstp = (state: AppState) => {
const {id} = getOrg(state)
return {
org: id,
}
}
export {ClientPHPOverlay}
export default connect<StateProps, {}, Props>(mstp, null)(ClientPHPOverlay)

View File

@ -0,0 +1,100 @@
// Libraries
import React, {FunctionComponent} from 'react'
import {connect} from 'react-redux'
// Components
import ClientLibraryOverlay from 'src/clientLibraries/components/ClientLibraryOverlay'
import TemplatedCodeSnippet from 'src/shared/components/TemplatedCodeSnippet'
// Constants
import {clientPythonLibrary} from 'src/clientLibraries/constants'
// Selectors
import {getOrg} from 'src/organizations/selectors'
// Types
import {AppState} from 'src/types'
interface StateProps {
org: string
}
type Props = StateProps
const ClientPythonOverlay: FunctionComponent<Props> = props => {
const {
name,
url,
initializePackageCodeSnippet,
initializeClientCodeSnippet,
executeQueryCodeSnippet,
writingDataLineProtocolCodeSnippet,
writingDataPointCodeSnippet,
writingDataBatchCodeSnippet,
} = clientPythonLibrary
const {org} = props
const server = window.location.origin
return (
<ClientLibraryOverlay title={`${name} Client Library`}>
<p>
For more detailed and up to date information check out the{' '}
<a href={url} target="_blank">
GitHub Repository
</a>
</p>
<h5>Install Package</h5>
<TemplatedCodeSnippet
template={initializePackageCodeSnippet}
label="Code"
/>
<h5>Initialize the Client</h5>
<TemplatedCodeSnippet
template={initializeClientCodeSnippet}
label="Python Code"
defaults={{
server: 'basepath',
token: 'token',
org: 'orgID',
bucket: 'bucketID',
}}
values={{
server,
org,
}}
/>
<h5>Write Data</h5>
<p>Option 1: Use InfluxDB Line Protocol to write data</p>
<TemplatedCodeSnippet
template={writingDataLineProtocolCodeSnippet}
label="Python Code"
/>
<p>Option 2: Use a Data Point to write data</p>
<TemplatedCodeSnippet
template={writingDataPointCodeSnippet}
label="Python Code"
/>
<p>Option 3: Use a Batch Sequence to write data</p>
<TemplatedCodeSnippet
template={writingDataBatchCodeSnippet}
label="Python Code"
/>
<h5>Execute a Flux query</h5>
<TemplatedCodeSnippet
template={executeQueryCodeSnippet}
label="Python Code"
/>
</ClientLibraryOverlay>
)
}
const mstp = (state: AppState) => {
const {id} = getOrg(state)
return {
org: id,
}
}
export {ClientPythonOverlay}
export default connect<StateProps, {}, Props>(mstp, null)(ClientPythonOverlay)

View File

@ -0,0 +1,103 @@
// Libraries
import React, {FunctionComponent} from 'react'
import {connect} from 'react-redux'
// Components
import ClientLibraryOverlay from 'src/clientLibraries/components/ClientLibraryOverlay'
import TemplatedCodeSnippet from 'src/shared/components/TemplatedCodeSnippet'
// Constants
import {clientRubyLibrary} from 'src/clientLibraries/constants'
// Selectors
import {getOrg} from 'src/organizations/selectors'
// Types
import {AppState} from 'src/types'
interface StateProps {
org: string
}
type Props = StateProps
const ClientRubyOverlay: FunctionComponent<Props> = props => {
const {
name,
url,
initializeGemCodeSnippet,
initializeClientCodeSnippet,
executeQueryCodeSnippet,
writingDataLineProtocolCodeSnippet,
writingDataPointCodeSnippet,
writingDataHashCodeSnippet,
writingDataBatchCodeSnippet,
} = clientRubyLibrary
const {org} = props
const server = window.location.origin
return (
<ClientLibraryOverlay title={`${name} Client Library`}>
<p>
For more detailed and up to date information check out the{' '}
<a href={url} target="_blank">
GitHub Repository
</a>
</p>
<h5>Install the Gem</h5>
<TemplatedCodeSnippet template={initializeGemCodeSnippet} label="Code" />
<h5>Initialize the Client</h5>
<TemplatedCodeSnippet
template={initializeClientCodeSnippet}
label="Ruby Code"
defaults={{
server: 'basepath',
token: 'token',
org: 'orgID',
bucket: 'bucketID',
}}
values={{
server,
org,
}}
/>
<h5>Write Data</h5>
<p>Option 1: Use InfluxDB Line Protocol to write data</p>
<TemplatedCodeSnippet
template={writingDataLineProtocolCodeSnippet}
label="Ruby Code"
/>
<p>Option 2: Use a Data Point to write data</p>
<TemplatedCodeSnippet
template={writingDataPointCodeSnippet}
label="Ruby Code"
/>
<p>Option 3: Use a Hash to write data</p>
<TemplatedCodeSnippet
template={writingDataHashCodeSnippet}
label="Ruby Code"
/>
<p>Option 4: Use a Batch Sequence to write data</p>
<TemplatedCodeSnippet
template={writingDataBatchCodeSnippet}
label="Ruby Code"
/>
<h5>Execute a Flux query</h5>
<TemplatedCodeSnippet
template={executeQueryCodeSnippet}
label="Ruby Code"
/>
</ClientLibraryOverlay>
)
}
const mstp = (state: AppState) => {
const {id} = getOrg(state)
return {
org: id,
}
}
export {ClientRubyOverlay}
export default connect<StateProps, {}, Props>(mstp, null)(ClientRubyOverlay)

View File

@ -0,0 +1,86 @@
// Libraries
import React, {FunctionComponent} from 'react'
import {connect} from 'react-redux'
// Components
import ClientLibraryOverlay from 'src/clientLibraries/components/ClientLibraryOverlay'
import TemplatedCodeSnippet from 'src/shared/components/TemplatedCodeSnippet'
// Constants
import {clientScalaLibrary} from 'src/clientLibraries/constants'
// Types
import {AppState} from 'src/types'
// Selectors
import {getOrg} from 'src/organizations/selectors'
interface StateProps {
org: string
}
type Props = StateProps
const ClientScalaOverlay: FunctionComponent<Props> = props => {
const {
name,
url,
buildWithSBTCodeSnippet,
buildWithMavenCodeSnippet,
buildWithGradleCodeSnippet,
initializeClientCodeSnippet,
executeQueryCodeSnippet,
} = clientScalaLibrary
const {org} = props
const server = window.location.origin
return (
<ClientLibraryOverlay title={`${name} Client Library`}>
<p>
For more detailed and up to date information check out the{' '}
<a href={url} target="_blank">
GitHub Repository
</a>
</p>
<h5>Add Dependency</h5>
<p>Build with sbt</p>
<TemplatedCodeSnippet template={buildWithSBTCodeSnippet} label="Code" />
<p>Build with Maven</p>
<TemplatedCodeSnippet template={buildWithMavenCodeSnippet} label="Code" />
<p>Build with Gradle</p>
<TemplatedCodeSnippet
template={buildWithGradleCodeSnippet}
label="Code"
/>
<h5>Initialize the Client</h5>
<TemplatedCodeSnippet
template={initializeClientCodeSnippet}
label="Scala Code"
defaults={{
server: 'basepath',
token: 'token',
org: 'orgID',
bucket: 'bucketID',
}}
values={{
server,
org,
}}
/>
<h5>Execute a Flux query</h5>
<TemplatedCodeSnippet
template={executeQueryCodeSnippet}
label="Scala Code"
/>
</ClientLibraryOverlay>
)
}
const mstp = (state: AppState) => {
return {
org: getOrg(state).id,
}
}
export {ClientScalaOverlay}
export default connect<StateProps, {}, Props>(mstp, null)(ClientScalaOverlay)

View File

@ -0,0 +1,692 @@
import {SFC} from 'react'
import {
ArduinoLogo,
CSharpLogo,
GoLogo,
JavaLogo,
JSLogo,
KotlinLogo,
PHPLogo,
PythonLogo,
RubyLogo,
ScalaLogo,
} from '../graphics'
export interface ClientLibrary {
id: string
name: string
url: string
image: SFC
}
export const clientArduinoLibrary = {
id: 'arduino',
name: 'Arduino',
url: 'https://github.com/tobiasschuerg/InfluxDB-Client-for-Arduino',
image: ArduinoLogo,
installingLibraryManagerCodeSnippet: `1. Open the Arduino IDE and click to the "Sketch" menu and then Include Library > Manage Libraries.
2. Type 'influxdb' in the search box
3. Install the 'InfluxDBClient for Arduino' library`,
installingManualCodeSnippet: `1. cd <arduino-sketch-location>/library.
2. git clone <%= url %>
3. Restart the Arduino IDE`,
initializeClientCodeSnippet: `#if defined(ESP32)
#include <WiFiMulti.h>
WiFiMulti wifiMulti;
#define DEVICE "ESP32"
#elif defined(ESP8266)
#include <ESP8266WiFiMulti.h>
ESP8266WiFiMulti wifiMulti;
#define DEVICE "ESP8266"
#endif
#include <InfluxDbClient.h>
#include <InfluxDbCloud.h>
// WiFi AP SSID
#define WIFI_SSID "SSID"
// WiFi password
#define WIFI_PASSWORD "PASSWORD"
// InfluxDB v2 server url, e.g. https://eu-central-1-1.aws.cloud2.influxdata.com (Use: InfluxDB UI -> Load Data -> Client Libraries)
#define INFLUXDB_URL "<%= server %>"
// InfluxDB v2 server or cloud API authentication token (Use: InfluxDB UI -> Data -> Tokens -> <select token>)
#define INFLUXDB_TOKEN "<%= token %>"
// InfluxDB v2 organization id (Use: InfluxDB UI -> User -> About -> Common Ids )
#define INFLUXDB_ORG "<%= org %>"
// InfluxDB v2 bucket name (Use: InfluxDB UI -> Data -> Buckets)
#define INFLUXDB_BUCKET "<%= bucket %>"
// Set timezone string according to https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
// Examples:
// Pacific Time: "PST8PDT"
// Eastern: "EST5EDT"
// Japanesse: "JST-9"
// Central Europe: "CET-1CEST,M3.5.0,M10.5.0/3"
#define TZ_INFO "CET-1CEST,M3.5.0,M10.5.0/3"
// InfluxDB client instance with preconfigured InfluxCloud certificate
InfluxDBClient client(INFLUXDB_URL, INFLUXDB_ORG, INFLUXDB_BUCKET, INFLUXDB_TOKEN, InfluxDbCloud2CACert);
// Data point
Point sensor("wifi_status");
void setup() {
Serial.begin(115200);
// Setup wifi
WiFi.mode(WIFI_STA);
wifiMulti.addAP(WIFI_SSID, WIFI_PASSWORD);
Serial.print("Connecting to wifi");
while (wifiMulti.run() != WL_CONNECTED) {
Serial.print(".");
delay(100);
}
Serial.println();
// Add tags
sensor.addTag("device", DEVICE);
sensor.addTag("SSID", WiFi.SSID());
// Accurate time is necessary for certificate validation and writing in batches
// For the fastest time sync find NTP servers in your area: https://www.pool.ntp.org/zone/
// Syncing progress and the time will be printed to Serial.
timeSync(TZ_INFO, "pool.ntp.org", "time.nis.gov");
// Check server connection
if (client.validateConnection()) {
Serial.print("Connected to InfluxDB: ");
Serial.println(client.getServerUrl());
} else {
Serial.print("InfluxDB connection failed: ");
Serial.println(client.getLastErrorMessage());
}
}`,
writingDataPointCodeSnippet: `void loop() {
// Clear fields for reusing the point. Tags will remain untouched
sensor.clearFields();
// Store measured value into point
// Report RSSI of currently connected network
sensor.addField("rssi", WiFi.RSSI());
// Print what are we exactly writing
Serial.print("Writing: ");
Serial.println(sensor.toLineProtocol());
// If no Wifi signal, try to reconnect it
if ((WiFi.RSSI() == 0) && (wifiMulti.run() != WL_CONNECTED)) {
Serial.println("Wifi connection lost");
}
// Write point
if (!client.writePoint(sensor)) {
Serial.print("InfluxDB write failed: ");
Serial.println(client.getLastErrorMessage());
}
//Wait 10s
Serial.println("Wait 10s");
delay(10000);
}`,
executeQueryCodeSnippet: `void loop() {
// Construct a Flux query
// Query will find the worst RSSI for last hour for each connected WiFi network with this device
String query = "from(bucket: \\"" INFLUXDB_BUCKET "\\") |> range(start: -1h) |> filter(fn: (r) => r._measurement == \\"wifi_status\\" and r._field == \\"rssi\\"";
query += " and r.device == \\"" DEVICE "\\")";
query += "|> min()";
// Print ouput header
Serial.print("==== ");
Serial.print(selectorFunction);
Serial.println(" ====");
// Print composed query
Serial.print("Querying with: ");
Serial.println(query);
// Send query to the server and get result
FluxQueryResult result = client.query(query);
// Iterate over rows. Even there is just one row, next() must be called at least once.
while (result.next()) {
// Get converted value for flux result column 'SSID'
String ssid = result.getValueByName("SSID").getString();
Serial.print("SSID '");
Serial.print(ssid);
Serial.print("' with RSSI ");
// Get converted value for flux result column '_value' where there is RSSI value
long value = result.getValueByName("_value").getLong();
Serial.print(value);
// Get converted value for the _time column
FluxDateTime time = result.getValueByName("_time").getDateTime();
// Format date-time for printing
// Format string according to http://www.cplusplus.com/reference/ctime/strftime/
String timeStr = time.format("%F %T");
Serial.print(" at ");
Serial.print(timeStr);
Serial.println();
}
// Check if there was an error
if(result.getError() != "") {
Serial.print("Query result error: ");
Serial.println(result.getError());
}
// Close the result
result.close();
}
`,
}
export const clientCSharpLibrary = {
id: 'csharp',
name: 'C#',
url: 'https://github.com/influxdata/influxdb-client-csharp',
image: CSharpLogo,
installingPackageManagerCodeSnippet: `Install-Package InfluxDB.Client`,
installingPackageDotNetCLICodeSnippet: `dotnet add package InfluxDB.Client`,
packageReferenceCodeSnippet: `<PackageReference Include="InfluxDB.Client" />`,
initializeClientCodeSnippet: `using System;
using System.Threading.Tasks;
using InfluxDB.Client;
using InfluxDB.Client.Api.Domain;
using InfluxDB.Client.Core;
using InfluxDB.Client.Writes;
namespace Examples
{
public class Examples
{
public static async Task Main(string[] args)
{
// You can generate a Token from the "Tokens Tab" in the UI
const string token = "<%= token %>";
const string bucket = "<%= bucket %>";
const string org = "<%= org %>";
var client = InfluxDBClientFactory.Create("<%= server %>", token.ToCharArray());
}
}
}`,
executeQueryCodeSnippet: `var query = $"from(bucket: \\"{bucket}\\") |> range(start: -1h)";
var tables = await client.GetQueryApi().QueryAsync(query, org)`,
writingDataLineProtocolCodeSnippet: `const string data = "mem,host=host1 used_percent=23.43234543";
using (var writeApi = client.GetWriteApi())
{
writeApi.WriteRecord(bucket, org, WritePrecision.Ns, data);
}`,
writingDataPointCodeSnippet: `var point = PointData
.Measurement("mem")
.Tag("host", "host1")
.Field("used_percent", 23.43234543)
.Timestamp(DateTime.UtcNow, WritePrecision.Ns);
using (var writeApi = client.GetWriteApi())
{
writeApi.WritePoint(bucket, org, point);
}`,
writingDataPocoCodeSnippet: `var mem = new Mem { Host = "host1", UsedPercent = 23.43234543, Time = DateTime.UtcNow };
using (var writeApi = client.GetWriteApi())
{
writeApi.WriteMeasurement(bucket, org, WritePrecision.Ns, mem);
}`,
pocoClassCodeSnippet: `// Public class
[Measurement("mem")]
private class Mem
{
[Column("host", IsTag = true)] public string Host { get; set; }
[Column("used_percent")] public double? UsedPercent { get; set; }
[Column(IsTimestamp = true)] public DateTime Time { get; set; }
}`,
}
export const clientGoLibrary = {
id: 'go',
name: 'GO',
url: 'https://github.com/influxdata/influxdb-client-go',
image: GoLogo,
initializeClientCodeSnippet: `package main
import (
"context"
"fmt"
"github.com/influxdata/influxdb-client-go"
"time"
)
func main() {
// You can generate a Token from the "Tokens Tab" in the UI
const token = "<%= token %>"
const bucket = "<%= bucket %>"
const org = "<%= org %>"
client := influxdb2.NewClient("<%= server %>", token)
// always close client at the end
defer client.Close()
}`,
writingDataPointCodeSnippet: `// create point using full params constructor
p := influxdb2.NewPoint("stat",
map[string]string{"unit": "temperature"},
map[string]interface{}{"avg": 24.5, "max": 45},
time.Now())
// write point asynchronously
writeApi.WritePoint(p)
// create point using fluent style
p = influxdb2.NewPointWithMeasurement("stat").
AddTag("unit", "temperature").
AddField("avg", 23.2).
AddField("max", 45).
SetTime(time.Now())
// write point asynchronously
writeApi.WritePoint(p)
// Flush writes
writeApi.Flush()`,
writingDataLineProtocolCodeSnippet: `// get non-blocking write client
writeApi := client.WriteApi(org, bucket)
// write line protocol
writeApi.WriteRecord(fmt.Sprintf("stat,unit=temperature avg=%f,max=%f", 23.5, 45.0))
writeApi.WriteRecord(fmt.Sprintf("stat,unit=temperature avg=%f,max=%f", 22.5, 45.0))
// Flush writes
writeApi.Flush()`,
executeQueryCodeSnippet: `query := fmt.Sprintf("from(bucket:\\"%v\\")|> range(start: -1h) |> filter(fn: (r) => r._measurement == \\"stat\\")", bucket)
// Get query client
queryApi := client.QueryApi(org)
// get QueryTableResult
result, err := queryApi.Query(context.Background(), query)
if err == nil {
// Iterate over query response
for result.Next() {
// Notice when group key has changed
if result.TableChanged() {
fmt.Printf("table: %s\\n", result.TableMetadata().String())
}
// Access data
fmt.Printf("value: %v\\n", result.Record().Value())
}
// check for an error
if result.Err() != nil {
fmt.Printf("query parsing error: %\\n", result.Err().Error())
}
} else {
panic(err)
}`,
}
export const clientJavaLibrary = {
id: 'java',
name: 'Java',
url: 'https://github.com/influxdata/influxdb-client-java',
image: JavaLogo,
buildWithMavenCodeSnippet: `<dependency>
<groupId>com.influxdb</groupId>
<artifactId>influxdb-client-java</artifactId>
<version>1.8.0</version>
</dependency>`,
buildWithGradleCodeSnippet: `dependencies {
compile "com.influxdb:influxdb-client-java:1.8.0"
}`,
initializeClientCodeSnippet: `package example;
import java.time.Instant;
import java.util.List;
import com.influxdb.annotations.Column;
import com.influxdb.annotations.Measurement;
import com.influxdb.client.InfluxDBClient;
import com.influxdb.client.InfluxDBClientFactory;
import com.influxdb.client.WriteApi;
import com.influxdb.client.domain.WritePrecision;
import com.influxdb.client.write.Point;
import com.influxdb.query.FluxTable;
public class InfluxDB2Example {
public static void main(final String[] args) {
// You can generate a Token from the "Tokens Tab" in the UI
String token = "<%= token %>";
String bucket = "<%= bucket %>";
String org = "<%= org %>";
InfluxDBClient client = InfluxDBClientFactory.create("<%= server %>", token.toCharArray());
}
}`,
executeQueryCodeSnippet: `String query = String.format("from(bucket: \\"%s\\") |> range(start: -1h)", bucket);
List<FluxTable> tables = client.getQueryApi().query(query, org);`,
writingDataLineProtocolCodeSnippet: `String data = "mem,host=host1 used_percent=23.43234543";
try (WriteApi writeApi = client.getWriteApi()) {
writeApi.writeRecord(bucket, org, WritePrecision.NS, data);
}`,
writingDataPointCodeSnippet: `Point point = Point
.measurement("mem")
.addTag("host", "host1")
.addField("used_percent", 23.43234543)
.time(Instant.now(), WritePrecision.NS);
try (WriteApi writeApi = client.getWriteApi()) {
writeApi.writePoint(bucket, org, point);
}`,
writingDataPojoCodeSnippet: `Mem mem = new Mem();
mem.host = "host1";
mem.used_percent = 23.43234543;
mem.time = Instant.now();
try (WriteApi writeApi = client.getWriteApi()) {
writeApi.writeMeasurement(bucket, org, WritePrecision.NS, mem);
}`,
pojoClassCodeSnippet: `@Measurement(name = "mem")
public static class Mem {
@Column(tag = true)
String host;
@Column
Double used_percent;
@Column(timestamp = true)
Instant time;
}`,
}
export const clientJSLibrary = {
id: 'javascript-node',
name: 'JavaScript/Node.js',
url: 'https://github.com/influxdata/influxdb-client-js',
image: JSLogo,
initializeNPMCodeSnippet: `npm i @influxdata/influxdb-client`,
initializeClientCodeSnippet: `const {InfluxDB} = require('@influxdata/influxdb-client')
// You can generate a Token from the "Tokens Tab" in the UI
const token = '<%= token %>'
const org = '<%= org %>'
const bucket = '<%= bucket %>'
const client = new InfluxDB({url: '<%= server %>', token: token})`,
executeQueryCodeSnippet: `const queryApi = client.getQueryApi(org)
const query = \`from(bucket: \"\${bucket}\") |> range(start: -1h)\`
queryApi.queryRows(query, {
next(row, tableMeta) {
const o = tableMeta.toObject(row)
console.log(
\`\${o._time} \${o._measurement} in \'\${o.location}\' (\${o.example}): \${o._field}=\${o._value}\`
)
},
error(error) {
console.error(error)
console.log('\\nFinished ERROR')
},
complete() {
console.log('\\nFinished SUCCESS')
},
})`,
writingDataLineProtocolCodeSnippet: `const {Point} = require('@influxdata/influxdb-client')
const writeApi = client.getWriteApi(org, bucket)
writeApi.useDefaultTags({host: 'host1'})
const point = new Point('mem')
.floatField('used_percent', 23.43234543)
writeApi.writePoint(point)
writeApi
.close()
.then(() => {
console.log('FINISHED')
})
.catch(e => {
console.error(e)
console.log('\\nFinished ERROR')
})`,
}
export const clientPythonLibrary = {
id: 'python',
name: 'Python',
url: 'https://github.com/influxdata/influxdb-client-python',
image: PythonLogo,
initializePackageCodeSnippet: `pip install influxdb-client`,
initializeClientCodeSnippet: `from datetime import datetime
from influxdb_client import InfluxDBClient, Point, WritePrecision
from influxdb_client.client.write_api import SYNCHRONOUS
# You can generate a Token from the "Tokens Tab" in the UI
token = "<%= token %>"
org = "<%= org %>"
bucket = "<%= bucket %>"
client = InfluxDBClient(url="<%= server %>", token=token)`,
executeQueryCodeSnippet: `query = f'from(bucket: \\"{bucket}\\") |> range(start: -1h)'
tables = client.query_api().query(query, org=org)`,
writingDataLineProtocolCodeSnippet: `write_api = client.write_api(write_options=SYNCHRONOUS)
data = "mem,host=host1 used_percent=23.43234543"
write_api.write(bucket, org, data)`,
writingDataPointCodeSnippet: `point = Point("mem")\\
.tag("host", "host1")\\
.field("used_percent", 23.43234543)\\
.time(datetime.utcnow(), WritePrecision.NS)
write_api.write(bucket, org, point)`,
writingDataBatchCodeSnippet: `sequence = ["mem,host=host1 used_percent=23.43234543",
"mem,host=host1 available_percent=15.856523"]
write_api.write(bucket, org, sequence)`,
}
export const clientRubyLibrary = {
id: 'ruby',
name: 'Ruby',
url: 'https://github.com/influxdata/influxdb-client-ruby',
image: RubyLogo,
initializeGemCodeSnippet: `gem install influxdb-client`,
initializeClientCodeSnippet: `require 'influxdb-client'
# You can generate a Token from the "Tokens Tab" in the UI
token = '<%= token %>'
org = '<%= org %>'
bucket = '<%= bucket %>'
client = InfluxDB2::Client.new('<%= server %>', token,
precision: InfluxDB2::WritePrecision::NANOSECOND)`,
executeQueryCodeSnippet: `query = "from(bucket: \\"#{bucket}\\") |> range(start: -1h)"
tables = client.create_query_api.query(query: query, org: org)`,
writingDataLineProtocolCodeSnippet: `write_api = client.create_write_api
data = 'mem,host=host1 used_percent=23.43234543'
write_api.write(data: data, bucket: bucket, org: org)`,
writingDataPointCodeSnippet: `point = InfluxDB2::Point.new(name: 'mem')
.add_tag('host', 'host1')
.add_field('used_percent', 23.43234543)
.time(Time.now.utc, InfluxDB2::WritePrecision::NANOSECOND)
write_api.write(data: point, bucket: bucket, org: org)`,
writingDataHashCodeSnippet: `hash = {name: 'h2o',
tags: {host: 'aws', region: 'us'},
fields: {level: 5, saturation: '99%'},
time: Time.now.utc}
write_api.write(data: hash, bucket: bucket, org: org)`,
writingDataBatchCodeSnippet: `point = InfluxDB2::Point.new(name: 'mem')
.add_tag('host', 'host1')
.add_field('used_percent', 23.43234543)
.time(Time.now.utc, InfluxDB2::WritePrecision::NANOSECOND)
hash = {name: 'h2o',
tags: {host: 'aws', region: 'us'},
fields: {level: 5, saturation: '99%'},
time: Time.now.utc}
data = 'mem,host=host1 used_percent=23.23234543'
write_api.write(data: [point, hash, data], bucket: bucket, org: org)`,
}
export const clientPHPLibrary = {
id: 'php',
name: 'PHP',
url: 'https://github.com/influxdata/influxdb-client-php',
image: PHPLogo,
initializeComposerCodeSnippet: `composer require influxdata/influxdb-client-php`,
initializeClientCodeSnippet: `use InfluxDB2\\Client;
use InfluxDB2\\Model\\WritePrecision;
use InfluxDB2\\Point;
# You can generate a Token from the "Tokens Tab" in the UI
$token = '<%= token %>';
$org = '<%= org %>';
$bucket = '<%= bucket %>';
$client = new Client([
"url" => "<%= server %>",
"token" => $token,
]);`,
executeQueryCodeSnippet: `$query = "from(bucket: \\"{$bucket}\\") |> range(start: -1h)";
$tables = $client->createQueryApi()->query($query, $org);`,
writingDataLineProtocolCodeSnippet: `$writeApi = $client->createWriteApi();
$data = "mem,host=host1 used_percent=23.43234543";
$writeApi->write($data, WritePrecision::S, $bucket, $org);`,
writingDataPointCodeSnippet: `$point = Point::measurement('mem')
->addTag('host', 'host1')
->addField('used_percent', 23.43234543)
->time(microtime(true));
$writeApi->write($point, WritePrecision::S, $bucket, $org);`,
writingDataArrayCodeSnippet: `$dataArray = ['name' => 'cpu',
'tags' => ['host' => 'server_nl', 'region' => 'us'],
'fields' => ['internal' => 5, 'external' => 6],
'time' => microtime(true)];
$writeApi->write($dataArray, WritePrecision::S, $bucket, $org);`,
}
export const clientKotlinLibrary = {
id: 'kotlin',
name: 'Kotlin',
url:
'https://github.com/influxdata/influxdb-client-java/tree/master/client-kotlin',
image: KotlinLogo,
buildWithMavenCodeSnippet: `<dependency>
<groupId>com.influxdb</groupId>
<artifactId>influxdb-client-kotlin</artifactId>
<version>1.8.0</version>
</dependency>`,
buildWithGradleCodeSnippet: `dependencies {
compile "com.influxdb:influxdb-client-kotlin:1.8.0"
}`,
initializeClientCodeSnippet: `package example
import com.influxdb.client.kotlin.InfluxDBClientKotlinFactory
import kotlinx.coroutines.channels.consumeEach
import kotlinx.coroutines.channels.filter
import kotlinx.coroutines.channels.take
import kotlinx.coroutines.runBlocking
fun main() = runBlocking {
// You can generate a Token from the "Tokens Tab" in the UI
val token = "<%= token %>"
val org = "<%= org %>"
val bucket = "<%= bucket %>"
val client = InfluxDBClientKotlinFactory.create("<%= server %>", token.toCharArray(), org)
}`,
executeQueryCodeSnippet: `val query = ("from(bucket: \\"$bucket\\")"
+ " |> range(start: -1d)"
+ " |> filter(fn: (r) => (r[\\"_measurement\\"] == \\"cpu\\" and r[\\"_field\\"] == \\"usage_system\\"))")
// Result is returned as a stream
val results = client.getQueryKotlinApi().query(query)
// Example of additional result stream processing on client side
results
// filter on client side using \`filter\` built-in operator
.filter { "cpu0" == it.getValueByKey("cpu") }
// take first 20 records
.take(20)
// print results
.consumeEach { println("Measurement: $\{it.measurement}, value: $\{it.value}") }
client.close()`,
}
export const clientScalaLibrary = {
id: 'scala',
name: 'Scala',
url:
'https://github.com/influxdata/influxdb-client-java/tree/master/client-scala',
image: ScalaLogo,
buildWithSBTCodeSnippet: `libraryDependencies += "com.influxdb" % "influxdb-client-scala" % "1.8.0"`,
buildWithMavenCodeSnippet: `<dependency>
<groupId>com.influxdb</groupId>
<artifactId>influxdb-client-scala</artifactId>
<version>1.8.0</version>
</dependency>`,
buildWithGradleCodeSnippet: `dependencies {
compile "com.influxdb:influxdb-client-scala:1.8.0"
}`,
initializeClientCodeSnippet: `package example
import akka.actor.ActorSystem
import akka.stream.scaladsl.Sink
import com.influxdb.client.scala.InfluxDBClientScalaFactory
import com.influxdb.query.FluxRecord
import scala.concurrent.Await
import scala.concurrent.duration.Duration
object InfluxDB2ScalaExample {
implicit val system: ActorSystem = ActorSystem("it-tests")
def main(args: Array[String]): Unit = {
// You can generate a Token from the "Tokens Tab" in the UI
val token = "<%= token %>"
val org = "<%= org %>"
val bucket = "<%= bucket %>"
val client = InfluxDBClientScalaFactory.create("<%= server %>", token.toCharArray, org)
}
}`,
executeQueryCodeSnippet: `val query = (s"""from(bucket: "$bucket")"""
+ " |> range(start: -1d)"
+ " |> filter(fn: (r) => (r[\\"_measurement\\"] == \\"cpu\\" and r[\\"_field\\"] == \\"usage_system\\"))")
// Result is returned as a stream
val results = client.getQueryScalaApi().query(query)
// Example of additional result stream processing on client side
val sink = results
// filter on client side using \`filter\` built-in operator
.filter(it => "cpu0" == it.getValueByKey("cpu"))
// take first 20 records
.take(20)
// print results
.runWith(Sink.foreach[FluxRecord](it => println(s"Measurement: $\{it.getMeasurement}, value: $\{it.getValue}")
))
// wait to finish
Await.result(sink, Duration.Inf)
client.close()
system.terminate()`,
}
export const clientLibraries: ClientLibrary[] = [
clientArduinoLibrary,
clientCSharpLibrary,
clientGoLibrary,
clientJavaLibrary,
clientJSLibrary,
clientKotlinLibrary,
clientPHPLibrary,
clientPythonLibrary,
clientRubyLibrary,
clientScalaLibrary,
]

View File

@ -0,0 +1,76 @@
// Libraries
import React, {PureComponent} from 'react'
import {connect} from 'react-redux'
import {Switch, Route} from 'react-router-dom'
// Components
import {ErrorHandling} from 'src/shared/decorators/errors'
import LoadDataTabbedPage from 'src/settings/components/LoadDataTabbedPage'
import LoadDataHeader from 'src/settings/components/LoadDataHeader'
import {Page} from '@influxdata/clockface'
import ClientLibraries from 'src/clientLibraries/components/ClientLibraries'
import ArduinoOverlay from 'src/clientLibraries/components/ClientArduinoOverlay'
import CSharpOverlay from 'src/clientLibraries/components/ClientCSharpOverlay'
import GoOverlay from 'src/clientLibraries/components/ClientGoOverlay'
import JavaOverlay from 'src/clientLibraries/components/ClientJavaOverlay'
import JSOverlay from 'src/clientLibraries/components/ClientJSOverlay'
import KotlinOverlay from 'src/clientLibraries/components/ClientKotlinOverlay'
import PHPOverlay from 'src/clientLibraries/components/ClientPHPOverlay'
import PythonOverlay from 'src/clientLibraries/components/ClientPythonOverlay'
import RubyOverlay from 'src/clientLibraries/components/ClientRubyOverlay'
import ScalaOverlay from 'src/clientLibraries/components/ClientScalaOverlay'
// Types
import {AppState, Organization} from 'src/types'
// Utils
import {pageTitleSuffixer} from 'src/shared/utils/pageTitles'
import {getOrg} from 'src/organizations/selectors'
interface StateProps {
org: Organization
}
import {ORGS, ORG_ID, CLIENT_LIBS} from 'src/shared/constants/routes'
const clientLibPath = `/${ORGS}/${ORG_ID}/load-data/${CLIENT_LIBS}`
@ErrorHandling
class ClientLibrariesPage extends PureComponent<StateProps> {
public render() {
const {org, children} = this.props
return (
<>
<Page titleTag={pageTitleSuffixer(['Client Libraries', 'Load Data'])}>
<LoadDataHeader />
<LoadDataTabbedPage activeTab="client-libraries" orgID={org.id}>
<ClientLibraries orgID={org.id} />
</LoadDataTabbedPage>
</Page>
<Switch>
<Route path={`${clientLibPath}/arduino`} component={ArduinoOverlay} />
<Route path={`${clientLibPath}/csharp`} component={CSharpOverlay} />
<Route path={`${clientLibPath}/go`} component={GoOverlay} />
<Route path={`${clientLibPath}/java`} component={JavaOverlay} />
<Route
path={`${clientLibPath}/javascript-node`}
component={JSOverlay}
/>
<Route path={`${clientLibPath}/kotlin`} component={KotlinOverlay} />
<Route path={`${clientLibPath}/php`} component={PHPOverlay} />
<Route path={`${clientLibPath}/python`} component={PythonOverlay} />
<Route path={`${clientLibPath}/ruby`} component={RubyOverlay} />
<Route path={`${clientLibPath}/scala`} component={ScalaOverlay} />
</Switch>
{children}
</>
)
}
}
const mstp = (state: AppState) => ({
org: getOrg(state),
})
export default connect<StateProps>(mstp)(ClientLibrariesPage)

View File

@ -0,0 +1,85 @@
// Libraries
import React, {SFC} from 'react'
const ArduinoLogo: SFC = () => {
return (
<svg
width="100"
height="100"
preserveAspectRatio="xMidYMid meet"
viewBox="0 0 720 490"
>
<style>
{
'.arduino{fill:none;stroke:#00979c;stroke-width:1}.text{fill:#00979c;stroke-width:1}'
}
</style>
<path
id="infinity"
className="arduino"
style={{
strokeWidth: 59,
}}
d="M174 30C78.937 22.427-.942 134.8 38.361 221.95c37.952 94.4 180.989 120.13 249.159 44.42 65.88-64.55 91.33-163.57 171.52-214.629 82.03-55.96 214.21-3.125 229.75 96.429 15.62 95.33-87.06 188.19-180.27 159.93-68.71-15.53-118.15-74.15-146.28-135.77-34.52-58.97-79.65-121.804-150.05-138.385C199.68 31.108 186.82 29.987 174 30z"
/>
<path
id="minus"
className="arduino"
d="M118 165h120"
style={{
strokeWidth: 25,
}}
/>
<path
id="plus"
className="arduino"
d="M486 165h100m-51-50v100"
style={{
strokeWidth: 32,
}}
/>
<path
id="A"
className="arduino text"
d="M84 485l-6.4-23H37l-6.6 22.963H8.683l34.445-116.329h30.617l34.445 116.33zm-26.434-94.968l-15.31 54.115h30.44z"
/>
<path
id="R"
className="arduino text"
d="M180 485c-6.16-13.64-11.68-27.59-18.29-41.01-3.83-5.76-11.35-8.02-17.98-7.09-1.05 3.95-.25 8.16-.48 12.23V485h-21.9V368.67c16.45.38 33.02-.97 49.38 1.19 10.7 1.73 21.86 7.97 25.01 19.01 3.95 11.8 1.99 27.03-8.6 34.79-4.16 3.15-9.14 5.16-14.28 5.94 6.54 2.6 10.52 8.65 13.33 14.78 6.18 11.9 11.47 24.3 17.49 36.24 2.9 2.89 1.15 5.26-2.34 4.38H180zm-5.42-82.07c.81-7.4-5.22-14.46-12.58-15.15-6.16-1.34-12.5-.73-18.75-.87v32.85c9.21-.1 19.95 1.01 27.15-5.96 2.82-2.84 4.26-6.9 4.18-10.87z"
/>
<path
id="D"
className="arduino text"
d="M307 425c0 16.34-3.65 34.15-16.06 45.74-11.93 11.24-29.05 14.33-44.91 13.9h-26.88V368.31c13.64.1 27.28-.22 40.92.21 13.54.73 27.94 5.34 36.34 16.62 8.57 11.29 10.54 26.07 10.59 39.86zm-23.5 1.43c-.19-11.38-.97-24.42-9.68-32.82-7.29-6.93-17.82-7.17-27.29-6.97h-5.48v79.66c10 0 20.87.79 29.42-5.4 10.97-7.59 12.99-22.14 13.03-34.47z"
/>
<path
id="U"
className="arduino text"
d="M407 442c.32 14.07-4.38 29.63-16.62 37.76-14.07 8.91-32.18 8.82-47.57 3.64-11.57-4.55-19.3-16.37-19.86-28.66-1.53-13.95-.27-28-.68-42v-44.25h21.89c.2 27.67-.43 55.35.39 83 .75 7.75 6.1 15.87 14.37 16.72 8.38 1.89 19.29.1 23.24-8.51 4.66-10.13 2.5-21.52 2.98-32.29v-58.92h21.9c0 24.49.2 49.09 0 73.51z"
/>
<path
id="I"
className="arduino text"
d="M428 386v-17.89h73.518V386h-25.634v80.371h25.634v18.068H428v-18.068h25.633V386z"
/>
<path
id="N"
className="arduino text"
d="M583 485c-12.51-28.13-25.44-56.08-37.56-84.38V485H525.5V368.67h26.88c11.83 26.48 24.41 52.64 35.67 79.36.66 2.94 2.27 4.52 1.72.61v-79.97h19.93V485H583z"
/>
<path
id="O"
className="arduino text"
d="M716 426c.1 17.33-3.54 36.21-16.34 48.83-13 12.7-33.67 14.75-50.21 8.52-13.94-5.33-22.03-19.45-24.87-33.46-3.05-15.52-3.18-31.81.62-47.2 3.61-14.3 12.89-28.04 27.23-33.19 13.4-4.85 29.21-4.31 41.72 2.81 13.59 8.11 19.33 24.2 21.03 39.17.58 4.81.82 9.67.82 14.52zm-23.14 1.07c-.25-11.45-.6-23.99-7.42-33.72-6.14-8.04-18.48-9.19-27.03-4.6-9.49 5.83-11.73 17.93-12.57 28.18-.63 13.14-.81 27.06 4.89 39.23 3.41 7.57 11.95 11.53 20 10.74 8.31 0 15.54-6.02 18.24-13.68 3.27-8.28 3.81-17.35 3.89-26.15z"
/>
<path
id="TM"
className="arduino text"
d="M676.41 13.375v2.344h5.15v13.562h2.66V15.719h5.19v-2.344h-13zm14.78 0v15.906h2.43V15.719l4.29 13.562h2.4c1.37-4.447 2.73-8.896 4.1-13.343v13.343h2.4V13.375H703c-1.3 4.241-2.61 8.478-3.91 12.719-1.33-4.241-2.67-8.478-4-12.719h-3.9z"
/>
</svg>
)
}
/* eslint-enable */
export default ArduinoLogo

View File

@ -0,0 +1,98 @@
// Libraries
import React, {SFC} from 'react'
const CSharpLogo: SFC = () => {
return (
<svg
width="100%"
height="100%"
preserveAspectRatio="xMidYMid meet"
viewBox="0 0 300 300"
>
<style>
{`
.csharp0{fill:none;}
.csharp1{fill:#9A4993;}
.csharp2{fill:#6A1577;}
.csharp3{fill:#813084;}
.csharp4{fill:#FFFFFF;}
`}
</style>
<rect className="csharp0" width="300" height="300" />
<g>
<path
id="XMLID_7_"
className="csharp1"
d="M230,113c0-3-0.6-5.7-2-8c-1.3-2.3-3.2-4.1-5.8-5.6c-21.3-12.3-42.6-24.5-63.9-36.8
c-5.7-3.3-11.3-3.2-17,0.2c-8.5,5-50.9,29.3-63.6,36.6c-5.2,3-7.7,7.6-7.7,13.6c0,24.7,0,49.4,0,74.1c0,3,0.6,5.6,1.9,7.8
c1.3,2.3,3.2,4.3,5.9,5.8c12.6,7.3,55.1,31.6,63.6,36.6c5.7,3.4,11.3,3.5,17,0.2c21.3-12.3,42.6-24.5,63.9-36.8
c2.6-1.5,4.6-3.5,5.9-5.8c1.2-2.3,1.9-4.9,1.9-7.8C230,187,230,137.7,230,113"
/>
<path
id="XMLID_4_"
className="csharp2"
d="M150.2,149.8l-78.4,45.1c1.3,2.3,3.2,4.3,5.9,5.8c12.6,7.3,55.1,31.6,63.6,36.6
c5.7,3.4,11.3,3.5,17,0.2c21.3-12.3,42.6-24.5,63.9-36.8c2.6-1.5,4.6-3.5,5.9-5.8L150.2,149.8"
/>
<path
id="XMLID_5_"
className="csharp2"
d="M127,163.1c4.6,8,13.1,13.3,23,13.3c9.9,0,18.5-5.4,23.1-13.5l-22.8-13.2L127,163.1"
/>
<path
id="XMLID_6_"
className="csharp3"
d="M230,113c0-3-0.6-5.7-2-8l-77.8,44.8l77.9,45.1c1.2-2.3,1.9-4.9,1.9-7.8
C230,187,230,137.7,230,113"
/>
<path
id="XMLID_9_"
className="csharp4"
d="M173.1,163c-4.5,8-13.2,13.5-23.1,13.5c-9.8,0-18.4-5.4-23-13.3c-2.2-3.9-3.5-8.3-3.5-13.1
c0-14.6,11.8-26.5,26.5-26.5c9.8,0,18.3,5.3,22.9,13.2l23.1-13.3c-9.2-15.9-26.4-26.5-46-26.5c-29.3,0-53.1,23.8-53.1,53.1
c0,9.6,2.6,18.7,7,26.4c9.2,16,26.4,26.7,46.1,26.7c19.8,0,37-10.8,46.1-26.8L173.1,163"
/>
<g id="XMLID_32_">
<rect
id="XMLID_1_"
x="197"
y="137.4"
className="csharp4"
width="5.3"
height="25.5"
/>
<rect
id="XMLID_30_"
x="208.7"
y="137.4"
className="csharp4"
width="5.3"
height="25.5"
/>
<rect
id="XMLID_31_"
x="202.9"
y="131.6"
transform="matrix(-1.836970e-16 1 -1 -1.836970e-16 349.8133 -61.1451)"
className="csharp4"
width="5.3"
height="25.5"
/>
<rect
id="XMLID_23_"
x="202.9"
y="143.3"
transform="matrix(-1.836970e-16 1 -1 -1.836970e-16 361.4733 -49.4852)"
className="csharp4"
width="5.3"
height="25.5"
/>
</g>
</g>
</svg>
)
}
export default CSharpLogo

View File

@ -0,0 +1,18 @@
// Libraries
import React, {FC} from 'react'
import classnames from 'classnames'
interface Props {
className?: string
}
export const GithubLogo: FC<Props> = ({className}) => (
<svg
className={classnames('github-logo', className)}
role="img"
viewBox="0 0 24 24"
>
<title>GitHub icon</title>
<path d="M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12" />
</svg>
)

View File

@ -0,0 +1,87 @@
// Libraries
import React, {SFC} from 'react'
/* eslint-disable no-mixed-spaces-and-tabs */
const GoLogo: SFC = () => {
return (
<svg
width="100%"
height="100%"
preserveAspectRatio="xMidYMid meet"
viewBox="0 0 300 300"
xmlSpace="preserve"
>
<style>
{`
.go0{fill:none;}
.go1{fill:#2DBCAF;}
`}
</style>
<rect className="go0" width={300} height={300} />
<g>
<g>
<g>
<g>
<path
className="go1"
d="M32.1,130.4c-0.5,0-0.6-0.3-0.4-0.6l2.7-3.5c0.3-0.4,0.9-0.6,1.4-0.6h46.2c0.5,0,0.6,0.4,0.4,0.8l-2.2,3.4
c-0.3,0.4-0.9,0.8-1.3,0.8L32.1,130.4z"
/>
</g>
</g>
</g>
<g>
<g>
<g>
<path
className="go1"
d="M12.5,142.3c-0.5,0-0.6-0.3-0.4-0.6l2.7-3.5c0.3-0.4,0.9-0.6,1.4-0.6h59.1c0.5,0,0.8,0.4,0.6,0.8l-1,3.1
c-0.1,0.5-0.6,0.8-1.2,0.8L12.5,142.3z"
/>
</g>
</g>
</g>
<g>
<g>
<g>
<path
className="go1"
d="M43.9,154.2c-0.5,0-0.6-0.4-0.4-0.8l1.8-3.2c0.3-0.4,0.8-0.8,1.3-0.8h25.9c0.5,0,0.8,0.4,0.8,0.9l-0.3,3.1
c0,0.5-0.5,0.9-0.9,0.9L43.9,154.2z"
/>
</g>
</g>
</g>
<g>
<g id="CXHf1q_1_">
<g>
<g>
<path
className="go1"
d="M178.3,128.1c-8.2,2.1-13.7,3.6-21.8,5.7c-1.9,0.5-2.1,0.6-3.8-1.3c-1.9-2.2-3.4-3.6-6.1-4.9
c-8.2-4-16.1-2.8-23.4,1.9c-8.8,5.7-13.3,14.1-13.2,24.6c0.1,10.4,7.3,18.9,17.5,20.3c8.8,1.2,16.2-1.9,22-8.5
c1.2-1.4,2.2-3,3.5-4.8c-4.7,0-10.5,0-25,0c-2.7,0-3.4-1.7-2.5-3.9c1.7-4,4.8-10.7,6.6-14.1c0.4-0.8,1.3-2.1,3.2-2.1
c6.6,0,31,0,47.1,0c-0.3,3.5-0.3,7-0.8,10.5c-1.4,9.3-4.9,17.9-10.6,25.4c-9.3,12.3-21.5,19.9-36.9,22
c-12.7,1.7-24.5-0.8-34.8-8.5c-9.6-7.3-15-16.8-16.4-28.8c-1.7-14.1,2.5-26.8,11-37.9c9.2-12,21.4-19.7,36.3-22.4
c12.2-2.2,23.8-0.8,34.3,6.3c6.9,4.5,11.8,10.7,15,18.3C180.4,127,179.8,127.7,178.3,128.1z"
/>
</g>
<g>
<path
className="go1"
d="M221.2,199.7c-11.8-0.3-22.5-3.6-31.6-11.4c-7.6-6.6-12.4-15-14-25c-2.3-14.6,1.7-27.6,10.5-39.1
c9.5-12.4,20.9-18.9,36.3-21.6c13.2-2.3,25.6-1,36.9,6.6c10.2,7,16.6,16.4,18.3,28.9c2.2,17.5-2.8,31.7-14.9,43.9
c-8.5,8.7-19,14.1-31.1,16.6C228,199.2,224.5,199.3,221.2,199.7z M252,147.4c-0.1-1.7-0.1-3-0.4-4.3
c-2.3-12.8-14.1-20.1-26.4-17.2c-12,2.7-19.8,10.4-22.7,22.5c-2.3,10.1,2.6,20.3,11.9,24.5c7.1,3.1,14.2,2.7,21.1-0.8
C245.8,166.8,251.3,158.5,252,147.4z"
/>
</g>
</g>
</g>
</g>
</g>
</svg>
)
}
/* eslint-enable */
export default GoLogo

View File

@ -0,0 +1,152 @@
// Libraries
import React, {SFC} from 'react'
/* eslint-disable no-mixed-spaces-and-tabs */
const JSLogo: SFC = () => {
return (
<svg
width="100"
height="100"
xmlns="http://www.w3.org/2000/svg"
preserveAspectRatio="xMidYMid meet"
viewBox="0 0 300 300"
>
<style>
{`
.js0{fill:none;}
.js1{fill:#699F63;}
.js2{fill-rule:evenodd;clip-rule:evenodd;fill:#FFFFFF;}
.js3{fill-rule:evenodd;clip-rule:evenodd;fill:#699F63;}
.js4{clip-path:url(#XMLID_5_);}
.js5{fill:#699F63;}
.js6{fill:#699F63;}
.js7{fill:#699F63;}
.js8{fill:#699F63;}
.js9{fill:#699F63;}
.js10{fill:#699F63;}
`}
</style>
<rect className="js0" width={300} height={300} />
<g>
<g>
<path
className="js1"
d="M148.5,235.7c-0.9,0-1.8-0.2-2.6-0.7l-8.1-4.8c-1.2-0.7-0.6-0.9-0.2-1.1c1.6-0.6,1.9-0.7,3.7-1.7
c0.2-0.1,0.4-0.1,0.6,0l6.3,3.7c0.2,0.1,0.5,0.1,0.8,0l24.4-14.1c0.2-0.1,0.4-0.4,0.4-0.7v-28.1c0-0.3-0.1-0.5-0.4-0.7l-24.4-14.1
c-0.2-0.1-0.5-0.1-0.8,0l-24.4,14.1c-0.2,0.1-0.4,0.4-0.4,0.7v28.1c0,0.3,0.1,0.5,0.4,0.7l6.7,3.9c3.6,1.8,5.8-0.3,5.8-2.5v-27.8
c0-0.4,0.3-0.7,0.7-0.7h3.1c0.4,0,0.7,0.3,0.7,0.7v27.8c0,4.8-2.6,7.6-7.2,7.6c-1.4,0-2.5,0-5.6-1.5l-6.4-3.7
c-1.6-0.9-2.6-2.6-2.6-4.4v-28.1c0-1.8,1-3.5,2.6-4.4l24.4-14.1c1.5-0.9,3.6-0.9,5.1,0l24.4,14.1c1.6,0.9,2.6,2.6,2.6,4.4v28.1
c0,1.8-1,3.5-2.6,4.4L151,235C150.2,235.4,149.4,235.7,148.5,235.7z"
/>
<path
className="js1"
d="M156,216.3c-10.7,0-12.9-4.9-12.9-9c0-0.4,0.3-0.7,0.7-0.7h3.2c0.4,0,0.6,0.3,0.7,0.6
c0.5,3.2,1.9,4.8,8.3,4.8c5.1,0,7.3-1.2,7.3-3.9c0-1.6-0.6-2.7-8.6-3.5c-6.7-0.7-10.8-2.1-10.8-7.5c0-4.9,4.1-7.9,11.1-7.9
c7.8,0,11.7,2.7,12.2,8.5c0,0.2-0.1,0.4-0.2,0.5c-0.1,0.1-0.3,0.2-0.5,0.2h-3.2c-0.3,0-0.6-0.2-0.7-0.6c-0.8-3.4-2.6-4.5-7.6-4.5
c-5.6,0-6.3,2-6.3,3.4c0,1.8,0.8,2.3,8.3,3.3c7.5,1,11.1,2.4,11.1,7.7C168.1,213.3,163.7,216.3,156,216.3z"
/>
</g>
<g>
<path
className="js1"
d="M191.1,186.8c0,2.6-2.1,4.7-4.7,4.7c-2.6,0-4.7-2.1-4.7-4.7c0-2.7,2.2-4.7,4.7-4.7
C189,182.1,191.1,184.1,191.1,186.8z M182.5,186.8c0,2.2,1.8,4,3.9,4c2.2,0,4-1.8,4-4c0-2.2-1.8-3.9-4-3.9
C184.3,182.8,182.5,184.6,182.5,186.8z M184.7,184.1h1.8c0.6,0,1.8,0,1.8,1.4c0,1-0.6,1.2-1,1.3c0.7,0,0.8,0.5,0.9,1.2
c0.1,0.4,0.1,1.1,0.3,1.4h-1.1c0-0.2-0.2-1.6-0.2-1.7c-0.1-0.3-0.2-0.4-0.5-0.4h-0.9v2.1h-1V184.1z M185.7,186.5h0.8
c0.7,0,0.8-0.5,0.8-0.7c0-0.7-0.5-0.7-0.8-0.7h-0.8V186.5z"
/>
</g>
<path
className="js2"
d="M72.9,125.6c0-1.1-0.6-2.2-1.6-2.7L45.5,108c-0.4-0.3-0.9-0.4-1.4-0.4c0,0-0.2,0-0.3,0c-0.5,0-1,0.2-1.4,0.4
l-25.9,14.9c-1,0.6-1.6,1.6-1.6,2.7l0.1,40.1c0,0.6,0.3,1.1,0.8,1.3c0.5,0.3,1.1,0.3,1.6,0l15.4-8.8c1-0.6,1.6-1.6,1.6-2.7v-18.7
c0-1.1,0.6-2.1,1.6-2.7l6.5-3.8c0.5-0.3,1-0.4,1.6-0.4c0.5,0,1.1,0.1,1.6,0.4l6.5,3.8c1,0.6,1.6,1.6,1.6,2.7v18.7
c0,1.1,0.6,2.1,1.6,2.7l15.4,8.8c0.5,0.3,1.1,0.3,1.6,0c0.5-0.3,0.8-0.8,0.8-1.3L72.9,125.6z"
/>
<path
className="js2"
d="M197.3,70.5c-0.5-0.3-1.1-0.3-1.6,0c-0.5,0.3-0.8,0.8-0.8,1.3v39.7c0,0.4-0.2,0.8-0.5,0.9
c-0.3,0.2-0.8,0.2-1.1,0l-6.5-3.7c-1-0.6-2.2-0.6-3.1,0l-25.9,14.9c-1,0.6-1.6,1.6-1.6,2.7v29.9c0,1.1,0.6,2.1,1.6,2.7l25.9,14.9
c1,0.6,2.2,0.6,3.1,0l25.9-14.9c1-0.6,1.6-1.6,1.6-2.7V81.8c0-1.1-0.6-2.2-1.6-2.7L197.3,70.5z M194.9,146.4c0,0.3-0.1,0.5-0.4,0.7
l-8.9,5.1c-0.2,0.1-0.5,0.1-0.8,0l-8.9-5.1c-0.2-0.1-0.4-0.4-0.4-0.7v-10.3c0-0.3,0.1-0.5,0.4-0.7l8.9-5.1c0.2-0.1,0.5-0.1,0.8,0
l8.9,5.1c0.2,0.1,0.4,0.4,0.4,0.7V146.4z"
/>
<g>
<path
className="js2"
d="M283.4,135.9c1-0.6,1.6-1.6,1.6-2.7V126c0-1.1-0.6-2.1-1.6-2.7l-25.7-14.9c-1-0.6-2.2-0.6-3.1,0l-25.9,14.9
c-1,0.6-1.6,1.6-1.6,2.7v29.8c0,1.1,0.6,2.2,1.6,2.7l25.7,14.6c0.9,0.5,2.1,0.5,3.1,0l15.5-8.6c0.5-0.3,0.8-0.8,0.8-1.4
c0-0.6-0.3-1.1-0.8-1.4l-26-14.9c-0.5-0.3-0.8-0.8-0.8-1.4v-9.4c0-0.6,0.3-1.1,0.8-1.4l8.1-4.7c0.5-0.3,1.1-0.3,1.6,0l8.1,4.7
c0.5,0.3,0.8,0.8,0.8,1.4v7.4c0,0.6,0.3,1.1,0.8,1.4c0.5,0.3,1.1,0.3,1.6,0L283.4,135.9z"
/>
<path
className="js3"
d="M255.8,134.5c0.2-0.1,0.4-0.1,0.6,0l5,2.9c0.2,0.1,0.3,0.3,0.3,0.5v5.7c0,0.2-0.1,0.4-0.3,0.5l-5,2.9
c-0.2,0.1-0.4,0.1-0.6,0l-5-2.9c-0.2-0.1-0.3-0.3-0.3-0.5v-5.7c0-0.2,0.1-0.4,0.3-0.5L255.8,134.5z"
/>
</g>
<g>
<path
className="js1"
d="M116.2,108.5c-1-0.6-2.1-0.6-3.1,0l-25.7,14.8c-1,0.6-1.6,1.6-1.6,2.7v29.7c0,1.1,0.6,2.1,1.6,2.7
l25.7,14.9c1,0.6,2.1,0.6,3.1,0l25.7-14.9c1-0.6,1.6-1.6,1.6-2.7v-29.7c0-1.1-0.6-2.1-1.6-2.7L116.2,108.5z"
/>
<g className="js4">
<path
className="js0"
d="M113,108.5l-25.8,14.8c-1,0.6-1.7,1.6-1.7,2.7v29.7c0,0.7,0.3,1.4,0.8,2l29-49.5
C114.6,108,113.8,108.1,113,108.5z"
/>
<path
className="js0"
d="M115.5,173.6c0.3-0.1,0.5-0.2,0.7-0.3l25.7-14.9c1-0.6,1.6-1.6,1.6-2.7v-29.7c0-0.8-0.4-1.6-0.9-2.2
L115.5,173.6z"
/>
<path
className="js5"
d="M142,123.4l-25.8-14.8c-0.3-0.1-0.5-0.3-0.8-0.3l-29,49.5c0.2,0.3,0.5,0.5,0.9,0.7l25.8,14.9
c0.7,0.4,1.6,0.5,2.4,0.3l27.1-49.7C142.5,123.7,142.2,123.5,142,123.4z"
/>
</g>
<g className="js4">
<path
className="js0"
d="M143.6,155.8v-29.7c0-1.1-0.7-2.1-1.6-2.7l-25.8-14.8c-0.3-0.2-0.6-0.3-0.9-0.3l28.3,48.3
C143.6,156.2,143.6,156,143.6,155.8z"
/>
<path
className="js0"
d="M87.3,123.4c-1,0.6-1.7,1.6-1.7,2.7v29.7c0,1.1,0.7,2.1,1.7,2.7l25.8,14.9c0.6,0.4,1.3,0.5,2,0.4l-27.7-50.4
L87.3,123.4z"
/>
<polygon
className="js6"
points="112.2,103.1 111.9,103.3 112.4,103.3"
/>
<path
className="js7"
d="M142,158.4c0.7-0.4,1.3-1.2,1.5-2l-28.3-48.3c-0.7-0.1-1.5,0-2.2,0.3l-25.6,14.8l27.7,50.4
c0.4-0.1,0.8-0.2,1.1-0.4L142,158.4z"
/>
<polygon
className="js8"
points="144.4,158 144.2,157.7 144.2,158.1"
/>
<path
className="js9"
d="M142,158.4l-25.7,14.9c-0.4,0.2-0.7,0.3-1.1,0.4l0.5,0.9l28.6-16.6v-0.4l-0.7-1.2
C143.3,157.3,142.7,158,142,158.4z"
/>
<path
className="js10"
d="M142,158.4l-25.7,14.9c-0.4,0.2-0.7,0.3-1.1,0.4l0.5,0.9l28.6-16.6v-0.4l-0.7-1.2
C143.3,157.3,142.7,158,142,158.4z"
/>
</g>
</g>
</g>
</svg>
)
}
/* eslint-enable */
export default JSLogo

View File

@ -0,0 +1,106 @@
// Libraries
import React, {SFC} from 'react'
const JavaLogo: SFC = () => {
return (
<svg
width="100"
height="100"
xmlns="http://www.w3.org/2000/svg"
preserveAspectRatio="xMidYMid meet"
viewBox="0 0 300 300"
>
<style>
{`
.java0{fill:none;}
.java1{fill:#0074BD;}
.java2{fill:#EA2D2E;}
`}
</style>
<rect className="java0" width="300" height="300" />
<g>
<path
id="XMLID_25_"
className="java1"
d="M128.3,151.2c0,0-6.2,3.6,4.4,4.9c12.9,1.5,19.6,1.3,33.8-1.4c0,0,3.8,2.3,9,4.4
C143.6,172.8,103.2,158.3,128.3,151.2L128.3,151.2z"
/>
<path
id="XMLID_24_"
className="java1"
d="M124.4,133.4c0,0-7,5.2,3.7,6.3c13.8,1.4,24.7,1.5,43.6-2.1c0,0,2.6,2.6,6.7,4.1
C139.8,153,96.8,142.5,124.4,133.4L124.4,133.4z"
/>
<path
id="XMLID_22_"
className="java2"
d="M157.3,103c7.9,9.1-2.1,17.2-2.1,17.2s20-10.3,10.8-23.3c-8.6-12.1-15.2-18,20.5-38.7
C186.5,58.3,130.6,72.3,157.3,103L157.3,103z"
/>
<path
id="XMLID_21_"
className="java1"
d="M199.6,164.5c0,0,4.6,3.8-5.1,6.7c-18.4,5.6-76.8,7.3-93,0.2c-5.8-2.5,5.1-6.1,8.5-6.8
c3.6-0.8,5.6-0.6,5.6-0.6c-6.5-4.6-41.9,9-18,12.8C162.9,187.4,216.5,172.1,199.6,164.5L199.6,164.5z"
/>
<path
id="XMLID_20_"
className="java1"
d="M131.3,114.8c0,0-29.7,7.1-10.5,9.6c8.1,1.1,24.2,0.8,39.3-0.4c12.3-1,24.6-3.2,24.6-3.2
s-4.3,1.9-7.5,4c-30.1,7.9-88.4,4.2-71.6-3.9C119.8,114.1,131.3,114.8,131.3,114.8L131.3,114.8z"
/>
<path
id="XMLID_19_"
className="java1"
d="M184.6,144.6c30.6-15.9,16.5-31.2,6.6-29.2c-2.4,0.5-3.5,0.9-3.5,0.9s0.9-1.4,2.6-2
c19.6-6.9,34.6,20.3-6.3,31C183.9,145.4,184.4,145,184.6,144.6L184.6,144.6z"
/>
<path
id="XMLID_18_"
className="java2"
d="M166.1,25c0,0,17,17-16.1,43.1c-26.5,20.9-6,32.9,0,46.5c-15.5-14-26.8-26.3-19.2-37.7
C142,60.1,172.9,51.9,166.1,25L166.1,25z"
/>
<path
id="XMLID_17_"
className="java1"
d="M134.3,187.7c29.4,1.9,74.6-1,75.6-15c0,0-2.1,5.3-24.3,9.5c-25.1,4.7-56.1,4.2-74.4,1.1
C111.2,183.4,115,186.5,134.3,187.7L134.3,187.7z"
/>
<path
className="java2"
d="M208.6,212.7h-0.9v-0.5h2.4v0.5h-0.9v2.5h-0.6L208.6,212.7L208.6,212.7L208.6,212.7z M213.4,212.8L213.4,212.8
l-0.9,2.4h-0.4l-0.9-2.4h0v2.4h-0.6v-3h0.9l0.8,2.1l0.8-2.1h0.9v3h-0.6L213.4,212.8L213.4,212.8z"
/>
<path
className="java2"
d="M129.5,240.9c-2.8,2.4-5.7,3.8-8.3,3.8c-3.7,0-5.8-2.3-5.8-5.9c0-3.9,2.2-6.8,10.9-6.8h3.2V240.9L129.5,240.9
L129.5,240.9z M137.2,249.5v-26.7c0-6.8-3.9-11.3-13.3-11.3c-5.5,0-10.3,1.4-14.2,3.1l1.1,4.7c3.1-1.1,7.1-2.2,11-2.2
c5.4,0,7.7,2.2,7.7,6.7v3.4h-2.7c-13.1,0-19.1,5.1-19.1,12.8c0,6.6,3.9,10.4,11.3,10.4c4.7,0,8.3-2,11.6-4.8l0.6,4.1H137.2
L137.2,249.5z"
/>
<path
id="XMLID_12_"
className="java2"
d="M162.7,249.5h-9.5l-11.5-37.4h8.3l7.1,23l1.6,6.9c3.6-10,6.2-20.1,7.4-29.9h8.1
C172.1,224.5,168.2,238,162.7,249.5L162.7,249.5z"
/>
<path
className="java2"
d="M199.3,240.9c-2.8,2.4-5.7,3.8-8.3,3.8c-3.8,0-5.8-2.3-5.8-5.9c0-3.9,2.2-6.8,10.9-6.8h3.2L199.3,240.9
L199.3,240.9L199.3,240.9z M207,249.5v-26.7c0-6.8-3.9-11.3-13.3-11.3c-5.5,0-10.3,1.4-14.2,3.1l1.1,4.7c3.1-1.1,7.1-2.2,11-2.2
c5.4,0,7.7,2.2,7.7,6.7v3.4h-2.7c-13.1,0-19.1,5.1-19.1,12.8c0,6.6,3.9,10.4,11.3,10.4c4.7,0,8.3-2,11.6-4.8l0.6,4.1H207L207,249.5
z"
/>
<path
id="XMLID_8_"
className="java2"
d="M99.3,255.9c-2.2,3.2-5.7,5.7-9.6,7.1l-3.8-4.5c2.9-1.5,5.5-3.9,6.6-6.2c1-2,1.4-4.6,1.4-10.8
v-42.7h8.1V241C102.2,249.3,101.5,252.6,99.3,255.9L99.3,255.9z"
/>
</g>
</svg>
)
}
export default JavaLogo

View File

@ -0,0 +1,65 @@
// Libraries
import React, {SFC} from 'react'
const KotlinLogo: SFC = () => {
return (
<svg
viewBox="0 0 60 60"
xmlSpace="preserve"
preserveAspectRatio="xMidYMid meet"
width="80"
height="80"
>
<linearGradient
id="kotlin-a"
gradientUnits="userSpaceOnUse"
x1={15.959}
y1={-13.014}
x2={44.307}
y2={15.333}
gradientTransform="matrix(1 0 0 -1 0 61)"
>
<stop offset={0.097} stopColor="#0095d5" />
<stop offset={0.301} stopColor="#238ad9" />
<stop offset={0.621} stopColor="#557bde" />
<stop offset={0.864} stopColor="#7472e2" />
<stop offset={1} stopColor="#806ee3" />
</linearGradient>
<path fill="url(#kotlin-a)" d="M0 60L30.1 29.9 60 60z" />
<linearGradient
id="kotlin-b"
gradientUnits="userSpaceOnUse"
x1={4.209}
y1={48.941}
x2={20.673}
y2={65.405}
gradientTransform="matrix(1 0 0 -1 0 61)"
>
<stop offset={0.118} stopColor="#0095d5" />
<stop offset={0.418} stopColor="#3c83dc" />
<stop offset={0.696} stopColor="#6d74e1" />
<stop offset={0.833} stopColor="#806ee3" />
</linearGradient>
<path fill="url(#kotlin-b)" d="M0 0L30.1 0 0 32.5z" />
<linearGradient
id="kotlin-c"
gradientUnits="userSpaceOnUse"
x1={-10.102}
y1={5.836}
x2={45.731}
y2={61.669}
gradientTransform="matrix(1 0 0 -1 0 61)"
>
<stop offset={0.107} stopColor="#c757bc" />
<stop offset={0.214} stopColor="#d0609a" />
<stop offset={0.425} stopColor="#e1725c" />
<stop offset={0.605} stopColor="#ee7e2f" />
<stop offset={0.743} stopColor="#f58613" />
<stop offset={0.823} stopColor="#f88909" />
</linearGradient>
<path fill="url(#kotlin-c)" d="M30.1 0L0 31.7 0 60 30.1 29.9 60 0z" />
</svg>
)
}
export default KotlinLogo

View File

@ -0,0 +1,36 @@
// Libraries
import React, {SFC} from 'react'
const PHPLogo: SFC = () => {
return (
<svg
xmlns="http://www.w3.org/2000/svg"
enableBackground="new 0 0 100 100"
viewBox="0 0 100 100"
xmlSpace="preserve"
preserveAspectRatio="xMidYMid meet"
>
<style>{`.st0{fill:#FFFFFF;}`}</style>
<g transform="translate(-44.632 -141.55)">
<g transform="translate(-44.632 -141.55)">
<g transform="matrix(8.3528 0 0 8.3119 -727.13 -3759.5)">
<path
d="M98.7 490.6h1.8c.5 0 .9.2 1.1.5.2.3.3.7.2 1.2 0 .2-.1.5-.2.7-.1.2-.2.4-.4.6-.2.2-.5.4-.7.4-.3.1-.5.1-.8.1h-.8l-.3 1.3h-.9l1-4.8m.7.8l-.4 2h.2c.4 0 .8 0 1.1-.1.3-.1.5-.4.6-1 .1-.5 0-.7-.2-.8-.2-.1-.5-.1-.9-.1h-.4"
className="st0"
/>
<path
d="M102.9 489.4h.9l-.3 1.3h.8c.5 0 .8.1 1 .3.2.2.3.5.2 1l-.4 2.2h-.9l.4-2.1v-.5c0-.1-.2-.1-.5-.1h-.7l-.5 2.7h-.9l.9-4.8"
className="st0"
/>
<path
d="M106.5 490.6h1.8c.5 0 .9.2 1.1.5.2.3.3.7.2 1.2 0 .2-.1.5-.2.7-.1.2-.2.4-.4.6-.2.2-.5.4-.7.4-.3.1-.5.1-.8.1h-.8l-.3 1.3h-.9l1-4.8m.8.8l-.4 2h.2c.4 0 .8 0 1.1-.1.3-.1.5-.4.6-1 .1-.5 0-.7-.2-.8-.2-.1-.5-.1-.9-.1h-.4"
className="st0"
/>
</g>
</g>
</g>
</svg>
)
}
export default PHPLogo

View File

@ -0,0 +1,45 @@
// Libraries
import React, {SFC} from 'react'
/* eslint-disable no-mixed-spaces-and-tabs */
const PythonLogo: SFC = () => {
return (
<svg
width="100"
height="100"
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 300 300"
preserveAspectRatio="xMidYMid meet"
>
<style>
{`
.python0{fill:none;}
.python1{fill:#5A9FD4;}
.python2{fill:#FFD43B;}
`}
</style>
<rect className="python0" width={300} height={300} />
<g>
<path
id="path1948"
className="python1"
d="M149,65c-6.9,0-13.6,0.6-19.4,1.7c-17.2,3-20.3,9.4-20.3,21.1v15.5h40.6v5.2h-40.6H94.1
c-11.8,0-22.1,7.1-25.3,20.6c-3.7,15.4-3.9,25.1,0,41.2c2.9,12,9.8,20.6,21.6,20.6h13.9v-18.5c0-13.4,11.6-25.2,25.3-25.2h40.5
c11.3,0,20.3-9.3,20.3-20.6V87.7c0-11-9.3-19.2-20.3-21.1C163.2,65.5,155.9,65,149,65z M127.1,77.4c4.2,0,7.6,3.5,7.6,7.8
c0,4.3-3.4,7.7-7.6,7.7c-4.2,0-7.6-3.4-7.6-7.7C119.5,80.9,122.9,77.4,127.1,77.4z"
/>
<path
id="path1950"
className="python2"
d="M195.5,108.3v18c0,14-11.8,25.7-25.3,25.7h-40.5c-11.1,0-20.3,9.5-20.3,20.6v38.6
c0,11,9.6,17.5,20.3,20.6c12.8,3.8,25.1,4.5,40.5,0c10.2-3,20.3-8.9,20.3-20.6v-15.5h-40.5v-5.2h40.5h20.3
c11.8,0,16.2-8.2,20.3-20.6c4.2-12.7,4.1-24.9,0-41.2c-2.9-11.7-8.5-20.6-20.3-20.6H195.5z M172.7,206.1c4.2,0,7.6,3.4,7.6,7.7
c0,4.3-3.4,7.8-7.6,7.8c-4.2,0-7.6-3.5-7.6-7.8C165.1,209.6,168.5,206.1,172.7,206.1z"
/>
</g>
</svg>
)
}
/* eslint-enable */
export default PythonLogo

View File

@ -0,0 +1,37 @@
// Libraries
import React, {SFC} from 'react'
const RubyLogo: SFC = () => {
return (
<svg
version="1.1"
id="Layer_1"
viewBox="0 0 48 48"
enableBackground="new 0 0 48 48"
xmlSpace="preserve"
preserveAspectRatio="xMidYMid meet"
>
<polygon fill="#9B1010" points="42,14 40,40 14,42 " />
<polygon fill="#B71C1C" points="28,28 40,40 42,14 " />
<ellipse
transform="matrix(0.7071 -0.7071 0.7071 0.7071 -9.9411 24)"
fill="#C62828"
cx="24"
cy="24"
rx="22.621"
ry="11.664"
/>
<polygon
fill="#E53935"
points="10,17 17,10 25,6 31,10 28,19 19,27 10,30 6,24 "
/>
<path
fill="#FF5252"
d="M31,10l-6-4h11L31,10z M42,15l-11-5l-3,9L42,15z M19,27l13.235,5.235L28,19L19,27z M10,30l4,12l5-15L10,30z
M6,24v11l4-5L6,24z"
/>
</svg>
)
}
export default RubyLogo

View File

@ -0,0 +1,94 @@
// Libraries
import React, {SFC} from 'react'
const ScalaLogo: SFC = () => {
return (
<svg
viewBox="0 0 64 64"
height={100}
width={100}
preserveAspectRatio="xMidYMid meet"
>
<linearGradient id="scala-a">
<stop offset={0} stopColor="#656565" />
<stop offset={1} stopColor="#010101" />
</linearGradient>
<linearGradient
id="scala-c"
gradientUnits="userSpaceOnUse"
x1={13.528}
x2={88.264}
xlinkHref="#scala-a"
y1={-36.176}
y2={-36.176}
/>
<linearGradient
id="scala-d"
gradientUnits="userSpaceOnUse"
x1={13.528}
x2={88.264}
xlinkHref="#scala-a"
y1={3.91}
y2={3.91}
/>
<linearGradient id="scala-b">
<stop offset={0} stopColor="#9f1c20" />
<stop offset={1} stopColor="#ed2224" />
</linearGradient>
<linearGradient
id="scala-e"
gradientUnits="userSpaceOnUse"
x1={13.528}
x2={88.264}
xlinkHref="#scala-b"
y1={-55.974}
y2={-55.974}
/>
<linearGradient
id="scala-f"
gradientUnits="userSpaceOnUse"
x1={13.528}
x2={88.264}
xlinkHref="#scala-b"
y1={-15.87}
y2={-15.87}
/>
<linearGradient
id="scala-g"
gradientUnits="userSpaceOnUse"
x1={13.528}
x2={88.264}
xlinkHref="#scala-b"
y1={24.22}
y2={24.22}
/>
<path
d="M13.4-31s75 7.5 75 20v-30s0-12.5-75-20z"
fill="url(#scala-d)"
transform="matrix(.4923 0 0 .4923 6.942 39.877)"
/>
<path
d="M13.4 9s75 7.5 75 20V-1s0-12.5-75-20z"
fill="url(#scala-d)"
transform="matrix(.4923 0 0 .4923 6.942 39.877)"
/>
<path
d="M88.4-81v30s0 12.5-75 20v-30s75-7.5 75-20"
fill="url(#scala-e)"
transform="matrix(.4923 0 0 .4923 6.942 39.877)"
/>
<path
d="M13.4-21s75-7.5 75-20v30s0 12.5-75 20z"
fill="url(#scala-f)"
transform="matrix(.4923 0 0 .4923 6.942 39.877)"
/>
<path
d="M13.4 49V19s75-7.5 75-20v30s0 12.5-75 20"
fill="url(#scala-g)"
transform="matrix(.4923 0 0 .4923 6.942 39.877)"
/>
</svg>
)
}
export default ScalaLogo

View File

@ -0,0 +1,27 @@
import ArduinoLogo from 'src/clientLibraries/graphics/ArduinoLogo'
import CSharpLogo from 'src/clientLibraries/graphics/CSharpLogo'
import {GithubLogo} from 'src/clientLibraries/graphics/GithubLogo'
import GoLogo from 'src/clientLibraries/graphics/GoLogo'
import {GoogleLogo} from 'src/clientLibraries/graphics/GoogleLogo'
import JavaLogo from 'src/clientLibraries/graphics/JavaLogo'
import JSLogo from 'src/clientLibraries/graphics/JSLogo'
import KotlinLogo from 'src/clientLibraries/graphics/KotlinLogo'
import PHPLogo from 'src/clientLibraries/graphics/PHPLogo'
import PythonLogo from 'src/clientLibraries/graphics/PythonLogo'
import RubyLogo from 'src/clientLibraries/graphics/RubyLogo'
import ScalaLogo from 'src/clientLibraries/graphics/ScalaLogo'
export {
ArduinoLogo,
CSharpLogo,
GithubLogo,
GoLogo,
GoogleLogo,
JavaLogo,
JSLogo,
KotlinLogo,
PHPLogo,
PythonLogo,
RubyLogo,
ScalaLogo,
}

View File

@ -21,7 +21,7 @@ import auth0js, {WebAuth} from 'auth0-js'
// Components
import {LoginForm} from 'src/onboarding/components/LoginForm'
import {SocialButton} from 'src/shared/components/SocialButton'
import {GoogleLogo} from 'src/shared/graphics/GoogleLogo'
import {GoogleLogo} from 'src/clientLibraries/graphics'
// Types
import {Auth0Connection, FormFieldValidation} from 'src/types'

View File

@ -43,19 +43,10 @@ export const generateNavItems = (orgID: string): NavItem[] => {
shortLabel: 'Data',
link: {
type: 'link',
location: `${orgPrefix}/load-data/sources`,
location: `${orgPrefix}/load-data/buckets`,
},
activeKeywords: ['load-data'],
menu: [
{
id: 'sources',
testID: 'nav-subitem-sources',
label: 'Sources',
link: {
type: 'link',
location: `${orgPrefix}/load-data/sources`,
},
},
{
id: 'buckets',
testID: 'nav-subitem-buckets',
@ -93,6 +84,15 @@ export const generateNavItems = (orgID: string): NavItem[] => {
location: `${orgPrefix}/load-data/tokens`,
},
},
{
id: 'client-libraries',
testID: 'nav-subitem-client-libraries',
label: 'Client Libraries',
link: {
type: 'link',
location: `${orgPrefix}/load-data/client-libraries`,
},
},
],
},
{

View File

@ -1,5 +1,6 @@
// Libraries
import React, {Component} from 'react'
import {connect} from 'react-redux'
import {Switch, Route} from 'react-router-dom'
// Components
@ -12,26 +13,33 @@ import CreateScraperOverlay from 'src/scrapers/components/CreateScraperOverlay'
// Utils
import {pageTitleSuffixer} from 'src/shared/utils/pageTitles'
import {getOrg} from 'src/organizations/selectors'
// Decorators
import {ErrorHandling} from 'src/shared/decorators/errors'
// Types
import {ResourceType} from 'src/types'
import {AppState, Organization, ResourceType} from 'src/types'
// Constants
import {ORGS, ORG_ID, SCRAPERS} from 'src/shared/constants/routes'
interface StateProps {
org: Organization
}
const scrapersPath = `/${ORGS}/${ORG_ID}/load-data/${SCRAPERS}`
@ErrorHandling
class ScrapersIndex extends Component {
class ScrapersIndex extends Component<StateProps> {
public render() {
const {org} = this.props
return (
<>
<Page titleTag={pageTitleSuffixer(['Scrapers', 'Load Data'])}>
<LoadDataHeader />
<LoadDataTabbedPage activeTab="scrapers">
<LoadDataTabbedPage activeTab="scrapers" orgID={org.id}>
<GetResources
resources={[ResourceType.Scrapers, ResourceType.Buckets]}
>
@ -50,4 +58,6 @@ class ScrapersIndex extends Component {
}
}
export default ScrapersIndex
const mstp = (state: AppState) => ({org: getOrg(state)})
export default connect(mstp)(ScrapersIndex)

View File

@ -28,12 +28,6 @@ class LoadDataNavigation extends PureComponent<Props> {
}
const tabs = [
{
text: 'Sources',
id: 'sources',
cloudExclude: false,
featureFlag: null,
},
{
text: 'Buckets',
id: 'buckets',
@ -58,6 +52,12 @@ class LoadDataNavigation extends PureComponent<Props> {
cloudExclude: false,
featureFlag: null,
},
{
text: 'Client Libraries',
id: 'client-libraries',
cloudExclude: false,
featureFlag: null,
},
]
const activeTabName = tabs.find(t => t.id === activeTab).text

View File

@ -1,44 +1,33 @@
// Libraries
import React, {FC, ReactNode} from 'react'
import React, {PureComponent} from 'react'
import _ from 'lodash'
import {connect, ConnectedProps} from 'react-redux'
// Components
import LoadDataNavigation from 'src/settings/components/LoadDataNavigation'
import {Tabs, Orientation, Page} from '@influxdata/clockface'
// Utils
import {getOrg} from 'src/organizations/selectors'
// Decorators
import {ErrorHandling} from 'src/shared/decorators/errors'
// Types
import {AppState} from 'src/types'
interface ComponentProps {
interface Props {
activeTab: string
children?: ReactNode
orgID: string
}
type StateProps = ConnectedProps<typeof connector>
@ErrorHandling
class LoadDataTabbedPage extends PureComponent<Props> {
public render() {
const {activeTab, orgID, children} = this.props
type Props = ComponentProps & StateProps
const LoadDataTabbedPage: FC<Props> = ({activeTab, orgID, children}) => {
return (
<Page.Contents fullWidth={false} scrollable={true}>
<Tabs.Container orientation={Orientation.Horizontal}>
<LoadDataNavigation activeTab={activeTab} orgID={orgID} />
<Tabs.TabContents>{children}</Tabs.TabContents>
</Tabs.Container>
</Page.Contents>
)
return (
<Page.Contents fullWidth={false} scrollable={true}>
<Tabs.Container orientation={Orientation.Horizontal}>
<LoadDataNavigation activeTab={activeTab} orgID={orgID} />
<Tabs.TabContents>{children}</Tabs.TabContents>
</Tabs.Container>
</Page.Contents>
)
}
}
const mstp = (state: AppState) => {
const org = getOrg(state)
return {orgID: org.id}
}
const connector = connect(mstp)
export default connector(LoadDataTabbedPage)
export default LoadDataTabbedPage

View File

@ -11,10 +11,9 @@ import CopyButton from 'src/shared/components/CopyButton'
export interface Props {
copyText: string
onCopyText?: (text?: string, status?: boolean) => Notification
onCopyText?: (text: string, status: boolean) => Notification
testID?: string
label: string
onClick?: () => void
}
@ErrorHandling
@ -24,7 +23,7 @@ class CodeSnippet extends PureComponent<Props> {
}
public render() {
const {copyText, label, onCopyText, onClick} = this.props
const {copyText, label, onCopyText} = this.props
const testID = this.props.testID || 'code-snippet'
return (
@ -45,7 +44,6 @@ class CodeSnippet extends PureComponent<Props> {
textToCopy={copyText}
onCopyText={onCopyText}
contentName="Script"
onClick={onClick}
/>
<label className="code-snippet--label">{label}</label>
</div>

View File

@ -4,13 +4,7 @@ import CopyToClipboard from 'react-copy-to-clipboard'
import {connect, ConnectedProps} from 'react-redux'
// Components
import {
Button,
ComponentColor,
ComponentSize,
ButtonShape,
IconFont,
} from '@influxdata/clockface'
import {Button, ComponentColor, ComponentSize} from '@influxdata/clockface'
// Constants
import {
@ -23,16 +17,11 @@ import {notify as notifyAction} from 'src/shared/actions/notifications'
import {Notification} from 'src/types'
interface OwnProps {
shape: ButtonShape
icon?: IconFont
buttonText: string
textToCopy: string
contentName: string // if copying a script, its "script"
size: ComponentSize
color: ComponentColor
onCopyText?: (text: string, status: boolean) => Notification
testID: string
onClick?: () => void
}
type ReduxProps = ConnectedProps<typeof connector>
@ -40,33 +29,22 @@ type Props = OwnProps & ReduxProps
class CopyButton extends PureComponent<Props> {
public static defaultProps = {
shape: ButtonShape.Default,
buttonText: 'Copy to Clipboard',
size: ComponentSize.ExtraSmall,
color: ComponentColor.Secondary,
testID: 'button-copy',
}
public render() {
const {textToCopy, color, size, icon, shape, testID} = this.props
let buttonText = this.props.buttonText
if (shape === ButtonShape.Square) {
buttonText = undefined
}
const {textToCopy, color, size} = this.props
return (
<CopyToClipboard text={textToCopy} onCopy={this.handleCopyAttempt}>
<Button
shape={shape}
icon={icon}
size={size}
color={color}
titleText={buttonText}
text={buttonText}
titleText="Copy to Clipboard"
text="Copy to Clipboard"
onClick={this.handleClickCopy}
testID={testID}
testID="button-copy"
/>
</CopyToClipboard>
)
@ -80,11 +58,7 @@ class CopyButton extends PureComponent<Props> {
copiedText: string,
isSuccessful: boolean
): void => {
const {notify, onCopyText, onClick} = this.props
if (onClick) {
onClick()
}
const {notify, onCopyText} = this.props
if (onCopyText) {
notify(onCopyText(copiedText, isSuccessful))

View File

@ -28,8 +28,8 @@
font-weight: $cf-font-weight--medium;
}
h3:not(.cf-heading),
h4:not(.cf-heading) {
h3,
h4 {
border-bottom: $cf-border solid $g4-onyx;
padding-bottom: 0.25em;
}
@ -39,17 +39,17 @@
color: $g16-pearl;
}
table:not(.cf-table) {
table {
border-collapse: collapse;
}
td:not(.cf-table--cell),
th:not(.cf-table--header-cell) {
td,
th {
padding: $cf-marg-a $cf-marg-b;
border: $cf-border solid $g5-pepper;
}
th:not(.cf-table--header-cell) {
th {
background-color: $g5-pepper;
}

View File

@ -5,6 +5,8 @@ export const SIGNIN = '/signin'
export const BUCKETS = 'buckets'
export const BUCKET_ID = ':bucketID'
export const CLIENT_LIBS = 'client-libraries'
export const DASHBOARDS = 'dashboards'
export const DASHBOARD_ID = ':dashboardID'
@ -20,11 +22,3 @@ export const TELEGRAFS = 'telegrafs'
export const TOKENS = 'tokens'
export const VARIABLES = 'variables'
export const LOAD_DATA = 'load-data'
export const CLIENT_LIBS = 'client-libraries'
export const TELEGRAF_PLUGINS = 'telegraf-plugins'
export const DEVELOPER_TOOLS = 'developer-tools'
export const FLUX_SOURCES = 'flux-sources'
export const INTEGRATIONS = 'integrations'

View File

@ -17,7 +17,7 @@ import BucketsIndex from 'src/buckets/containers/BucketsIndex'
import TokensIndex from 'src/authorizations/containers/TokensIndex'
import TelegrafsPage from 'src/telegrafs/containers/TelegrafsPage'
import ScrapersIndex from 'src/scrapers/containers/ScrapersIndex'
import WriteDataPage from 'src/writeData/containers/WriteDataPage'
import ClientLibrariesPage from 'src/clientLibraries/containers/ClientLibrariesPage'
import VariablesIndex from 'src/variables/containers/VariablesIndex'
import TemplatesIndex from 'src/templates/containers/TemplatesIndex'
import LabelsIndex from 'src/labels/containers/LabelsIndex'
@ -27,19 +27,12 @@ import AlertHistoryIndex from 'src/alerting/components/AlertHistoryIndex'
import CheckHistory from 'src/checks/components/CheckHistory'
import MembersIndex from 'src/members/containers/MembersIndex'
import RouteToDashboardList from 'src/dashboards/components/RouteToDashboardList'
import ClientLibrariesPage from 'src/writeData/containers/ClientLibrariesPage'
import TelegrafPluginsPage from 'src/writeData/containers/TelegrafPluginsPage'
// Types
import {AppState, Organization, ResourceType} from 'src/types'
// Constants
import {CLOUD} from 'src/shared/constants'
import {
LOAD_DATA,
TELEGRAF_PLUGINS,
CLIENT_LIBS,
} from 'src/shared/constants/routes'
// Actions
import {setOrg} from 'src/organizations/actions/creators'
@ -139,42 +132,22 @@ const SetOrg: FC<Props> = ({
<Route path={`${orgPath}/notebooks`} component={NotebookPage} />
)}
{/* Write Data */}
{/* Load Data */}
<Route
path={`${orgPath}/${LOAD_DATA}/sources`}
component={WriteDataPage}
/>
<Route
path={`${orgPath}/${LOAD_DATA}/${CLIENT_LIBS}`}
path={`${orgPath}/load-data/client-libraries`}
component={ClientLibrariesPage}
/>
<Route
path={`${orgPath}/${LOAD_DATA}/${TELEGRAF_PLUGINS}`}
component={TelegrafPluginsPage}
/>
{/* Load Data */}
<Route
exact
path={`${orgPath}/${LOAD_DATA}`}
component={WriteDataPage}
/>
<Route
path={`${orgPath}/${LOAD_DATA}/scrapers`}
path={`${orgPath}/load-data/scrapers`}
component={ScrapersIndex}
/>
<Route
path={`${orgPath}/${LOAD_DATA}/telegrafs`}
path={`${orgPath}/load-data/telegrafs`}
component={TelegrafsPage}
/>
<Route
path={`${orgPath}/${LOAD_DATA}/tokens`}
component={TokensIndex}
/>
<Route
path={`${orgPath}/${LOAD_DATA}/buckets`}
component={BucketsIndex}
/>
<Route path={`${orgPath}/load-data/tokens`} component={TokensIndex} />
<Route path={`${orgPath}/load-data/buckets`} component={BucketsIndex} />
<Route exact path={`${orgPath}/load-data`} component={BucketsIndex} />
{/* Settings */}
<Route

View File

@ -16,11 +16,6 @@ export const OSS_FLAGS = {
'notebook-panel--spotify': false,
'notebook-panel--test-flux': false,
disableDefaultTableSort: false,
'load-data-client-libraries': true,
'load-data-telegraf-plugins': true,
'load-data-dev-tools': false,
'load-data-flux-sources': false,
'load-data-integrations': false,
'notification-endpoint-telegram': false,
}
@ -39,11 +34,6 @@ export const CLOUD_FLAGS = {
'notebook-panel--spotify': false,
'notebook-panel--test-flux': false,
disableDefaultTableSort: false,
'load-data-client-libraries': true,
'load-data-telegraf-plugins': true,
'load-data-dev-tools': false,
'load-data-flux-sources': false,
'load-data-integrations': false,
'notification-endpoint-telegram': false,
}

View File

@ -120,6 +120,7 @@
@import 'src/notifications/rules/components/RuleOverlayFooter.scss';
@import 'src/labels/components/LabelCard.scss';
@import 'src/alerting/components/SearchBar.scss';
@import 'src/clientLibraries/components/ClientLibraryOverlay.scss';
@import 'src/dashboards/components/DashboardsCardGrid.scss';
@import 'src/dashboards/components/DashboardLightMode.scss';
@import 'src/buckets/components/DemoDataDropdown.scss';

View File

@ -1,5 +1,6 @@
// Libraries
import React, {PureComponent} from 'react'
import {connect} from 'react-redux'
import {Route, Switch} from 'react-router-dom'
// Components
@ -33,24 +34,31 @@ const TelegrafOutputOverlay = RouteOverlay(
// Utils
import {pageTitleSuffixer} from 'src/shared/utils/pageTitles'
import {getOrg} from 'src/organizations/selectors'
// Types
import {ResourceType} from 'src/types'
import {AppState, Organization, ResourceType} from 'src/types'
// Constant
import {ORGS, ORG_ID, TELEGRAFS} from 'src/shared/constants/routes'
interface StateProps {
org: Organization
}
const telegrafsPath = `/${ORGS}/${ORG_ID}/load-data/${TELEGRAFS}`
@ErrorHandling
class TelegrafsPage extends PureComponent {
class TelegrafsPage extends PureComponent<StateProps> {
public render() {
const {org} = this.props
return (
<>
<Page titleTag={pageTitleSuffixer(['Telegraf', 'Load Data'])}>
<LimitChecker>
<LoadDataHeader />
<LoadDataTabbedPage activeTab="telegrafs">
<LoadDataTabbedPage activeTab="telegrafs" orgID={org.id}>
<GetResources
resources={[ResourceType.Buckets, ResourceType.Telegrafs]}
>
@ -79,4 +87,9 @@ class TelegrafsPage extends PureComponent {
}
}
export default TelegrafsPage
const mstp = (state: AppState) => {
const org = getOrg(state)
return {org}
}
export default connect<StateProps>(mstp)(TelegrafsPage)

View File

@ -1,58 +0,0 @@
// Libraries
import React, {FC, useContext} from 'react'
import {useParams} from 'react-router-dom'
// Contexts
import {WriteDataDetailsContext} from 'src/writeData/components/WriteDataDetailsContext'
// Components
import CodeSnippet from 'src/shared/components/CodeSnippet'
// Utils
import {event} from 'src/cloud/utils/reporting'
interface Props {
code: string
language: string
}
// NOTE: this is just a simplified form of the resig classic:
// https://johnresig.com/blog/javascript-micro-templating/
function transform(template, vars) {
const output = new Function(
'vars',
'var output=' +
JSON.stringify(template).replace(
/<%=(.+?)%>/g,
'"+(vars["$1".trim()])+"'
) +
';return output;'
)
return output(vars)
}
const WriteDataCodeSnippet: FC<Props> = ({code, language}) => {
const {contentID} = useParams()
const {bucket, token, origin, organization} = useContext(
WriteDataDetailsContext
)
const vars = {
token: token.token,
bucket: bucket.name,
server: origin,
org: organization.name,
}
const copyText = transform(code, vars)
const sendCopyEvent = (): void => {
event('dataSources_copyCode', {dataSourceName: `${contentID}`})
}
return (
<CodeSnippet copyText={copyText} label={language} onClick={sendCopyEvent} />
)
}
export default WriteDataCodeSnippet

View File

@ -1,95 +0,0 @@
// Libraries
import React, {FC, ReactNode, createContext, useState} from 'react'
import {connect, ConnectedProps} from 'react-redux'
// Utils
import {getAll} from 'src/resources/selectors'
import {getOrg} from 'src/organizations/selectors'
// Types
import {
AppState,
ResourceType,
Bucket,
Authorization,
Organization,
} from 'src/types'
interface ComponentProps {
children: ReactNode
}
type ReduxProps = ConnectedProps<typeof connector>
type Props = ComponentProps & ReduxProps
export interface WriteDataDetailsContextType {
organization: Organization
origin: string
bucket: Bucket
buckets: Bucket[]
changeBucket: (bucket: Bucket) => void
token: Authorization
tokens: Authorization[]
changeToken: (token: Authorization) => void
}
export const DEFAULT_WRITE_DATA_DETAILS_CONTEXT: WriteDataDetailsContextType = {
organization: null,
origin: '',
bucket: null,
buckets: [],
changeBucket: () => {},
token: null,
tokens: [],
changeToken: () => {},
}
export const WriteDataDetailsContext = createContext<
WriteDataDetailsContextType
>(DEFAULT_WRITE_DATA_DETAILS_CONTEXT)
const WriteDataDetailsContextProvider: FC<Props> = ({
organization,
buckets,
tokens,
children,
}) => {
const userBuckets = buckets.filter(b => b.type === 'user')
const [bucket, changeBucket] = useState<Bucket>(userBuckets[0])
const [token, changeToken] = useState<Authorization>(tokens[0])
const origin = window.location.origin
const value = {
organization,
origin,
bucket,
buckets: userBuckets,
changeBucket,
token,
tokens,
changeToken,
}
return (
<WriteDataDetailsContext.Provider value={value}>
{children}
</WriteDataDetailsContext.Provider>
)
}
const mstp = (state: AppState) => {
const buckets = getAll<Bucket>(state, ResourceType.Buckets)
const tokens = getAll<Authorization>(state, ResourceType.Authorizations)
const organization = getOrg(state)
return {
buckets,
tokens,
organization,
}
}
const connector = connect(mstp)
export default connector(WriteDataDetailsContextProvider)

View File

@ -1,102 +0,0 @@
@import '@influxdata/clockface/dist/variables.scss';
.write-data--details {
display: flex;
flex-direction: column;
align-items: center;
}
.write-data--details-thumbnail {
order: 1;
width: 200px;
margin-bottom: $cf-marg-c;
padding: $cf-marg-d;
background-color: $g3-castle;
border-radius: $cf-radius;
img {
width: 100%;
}
}
.write-data--details-content {
order: 2;
width: 100%;
font-size: 1.3em;
}
.write-data--details-widget {
width: 100%;
background-color: $g3-castle;
padding: $cf-marg-c;
margin-top: $cf-marg-c;
border-radius: $cf-radius;
}
.write-data--details-widget-title {
margin-bottom: $cf-marg-b;
padding-left: $cf-marg-b + $cf-marg-a;
padding-right: $cf-marg-b + $cf-marg-a;
}
.markdown-format .code-snippet--text {
background-color: $g1-raven;
}
@media screen and (min-width: $cf-grid--breakpoint-md) {
.write-data--details {
flex-direction: row;
align-items: flex-start;
}
.write-data--details-thumbnail {
flex: 1 0 200px;
width: auto;
margin-right: $cf-marg-e;
margin-bottom: 0;
}
.write-data--details-content {
width: auto;
flex: 8 0 0;
}
.write-data--details-sidebar {
flex: 1 0 200px;
width: auto;
margin-left: $cf-marg-c;
margin-top: 0;
}
}
// Helper Widget
.write-data-helper--heading {
display: flex;
align-items: center;
width: 100%;
opacity: 0.75;
transition: opacity 0.25s ease;
&:hover {
opacity: 1;
}
&:hover,
&:hover > * {
cursor: pointer;
}
}
.write-data-helper--heading__expanded {
opacity: 1;
}
.write-data-helper--caret {
transition: transform 0.25s ease;
transform: rotate(0deg);
margin-right: $cf-marg-b;
}
.write-data-helper--heading__expanded .write-data-helper--caret {
transform: rotate(90deg);
}

View File

@ -1,79 +0,0 @@
// Libraries
import React, {FC, ReactNode} from 'react'
import {useParams} from 'react-router-dom'
import ReactMarkdown, {Renderer} from 'react-markdown'
// Components
import {Page} from '@influxdata/clockface'
import WriteDataCodeSnippet from 'src/writeData/components/WriteDataCodeSnippet'
import WriteDataDetailsContextProvider from 'src/writeData/components/WriteDataDetailsContext'
import GetResources from 'src/resources/components/GetResources'
// Types
import {WriteDataSection} from 'src/writeData/constants'
import {ResourceType} from 'src/types'
// Graphics
import placeholderLogo from 'src/writeData/graphics/placeholderLogo.svg'
// Styles
import 'src/writeData/components/WriteDataDetailsView.scss'
interface Props {
section: WriteDataSection
children?: ReactNode
}
const codeRenderer: Renderer<HTMLPreElement> = (props: any): any => {
return <WriteDataCodeSnippet code={props.value} language={props.language} />
}
const WriteDataDetailsView: FC<Props> = ({section, children}) => {
const {contentID} = useParams()
const {name, markdown, image} = section.items.find(
item => item.id === contentID
)
let thumbnail = (
<img data-testid="load-data-details-thumb" src={image || placeholderLogo} />
)
let pageContent = <></>
if (image) {
thumbnail = <img data-testid="load-data-details-thumb" src={image} />
}
if (markdown) {
pageContent = (
<ReactMarkdown source={markdown} renderers={{code: codeRenderer}} />
)
}
return (
<GetResources
resources={[ResourceType.Authorizations, ResourceType.Buckets]}
>
<WriteDataDetailsContextProvider>
<Page>
<Page.Header fullWidth={false}>
<Page.Title title={name} />
</Page.Header>
<Page.Contents fullWidth={false} scrollable={true}>
<div className="write-data--details">
<div className="write-data--details-thumbnail">{thumbnail}</div>
<div
className="write-data--details-content markdown-format"
data-testid="load-data-details-content"
>
{children}
{pageContent}
</div>
</div>
</Page.Contents>
</Page>
</WriteDataDetailsContextProvider>
</GetResources>
)
}
export default WriteDataDetailsView

View File

@ -1,73 +0,0 @@
// Libraries
import React, {FC, useState} from 'react'
// Components
import {
Panel,
InfluxColors,
Heading,
HeadingElement,
FontWeight,
Grid,
Columns,
Icon,
IconFont,
ComponentSize,
} from '@influxdata/clockface'
import WriteDataHelperTokens from 'src/writeData/components/WriteDataHelperTokens'
import WriteDataHelperBuckets from 'src/writeData/components/WriteDataHelperBuckets'
const WriteDataHelper: FC<{}> = () => {
const [mode, changeMode] = useState<'expanded' | 'collapsed'>('expanded')
const handleToggleClick = (): void => {
if (mode === 'expanded') {
changeMode('collapsed')
} else {
changeMode('expanded')
}
}
return (
<Panel backgroundColor={InfluxColors.Castle}>
<Panel.Header size={ComponentSize.ExtraSmall}>
<div
className={`write-data-helper--heading write-data-helper--heading__${mode}`}
onClick={handleToggleClick}
>
<Icon
glyph={IconFont.CaretRight}
className="write-data-helper--caret"
/>
<Heading
element={HeadingElement.H5}
weight={FontWeight.Regular}
selectable={true}
>
Code Sample Options
</Heading>
</div>
</Panel.Header>
{mode === 'expanded' && (
<Panel.Body size={ComponentSize.ExtraSmall}>
<p>
Control how code samples in the documentation are populated with
system resources. Not all code samples make use of system resources.
</p>
<Grid>
<Grid.Row>
<Grid.Column widthSM={Columns.Six}>
<WriteDataHelperTokens />
</Grid.Column>
<Grid.Column widthSM={Columns.Six}>
<WriteDataHelperBuckets />
</Grid.Column>
</Grid.Row>
</Grid>
</Panel.Body>
)}
</Panel>
)
}
export default WriteDataHelper

View File

@ -1,51 +0,0 @@
// Libraries
import React, {FC, useContext} from 'react'
// Contexts
import {WriteDataDetailsContext} from 'src/writeData/components/WriteDataDetailsContext'
// Components
import {
List,
ComponentSize,
Heading,
HeadingElement,
Gradients,
InfluxColors,
} from '@influxdata/clockface'
const WriteDataHelperBuckets: FC = () => {
const {bucket, buckets, changeBucket} = useContext(WriteDataDetailsContext)
return (
<>
<Heading
element={HeadingElement.H6}
className="write-data--details-widget-title"
>
Bucket
</Heading>
<List
backgroundColor={InfluxColors.Obsidian}
style={{height: '200px'}}
maxHeight="200px"
>
{buckets.map(b => (
<List.Item
size={ComponentSize.Small}
key={b.id}
selected={b.id === bucket.id}
value={b}
onClick={changeBucket}
wrapText={true}
gradient={Gradients.GundamPilot}
>
{b.name}
</List.Item>
))}
</List>
</>
)
}
export default WriteDataHelperBuckets

View File

@ -1,51 +0,0 @@
// Libraries
import React, {FC, useContext} from 'react'
// Contexts
import {WriteDataDetailsContext} from 'src/writeData/components/WriteDataDetailsContext'
// Components
import {
List,
ComponentSize,
Heading,
HeadingElement,
Gradients,
InfluxColors,
} from '@influxdata/clockface'
const WriteDataHelperTokens: FC = () => {
const {token, tokens, changeToken} = useContext(WriteDataDetailsContext)
return (
<>
<Heading
element={HeadingElement.H6}
className="write-data--details-widget-title"
>
Token
</Heading>
<List
backgroundColor={InfluxColors.Obsidian}
style={{height: '200px'}}
maxHeight="200px"
>
{tokens.map(t => (
<List.Item
size={ComponentSize.Small}
key={t.id}
selected={t.id === token.id}
value={t}
onClick={changeToken}
wrapText={true}
gradient={Gradients.GundamPilot}
>
{t.description}
</List.Item>
))}
</List>
</>
)
}
export default WriteDataHelperTokens

View File

@ -1,46 +0,0 @@
// Libraries
import React, {FC} from 'react'
// Components
import {Page, SquareGrid, ComponentSize} from '@influxdata/clockface'
import WriteDataItem from 'src/writeData/components/WriteDataItem'
// Constants
import {WriteDataSection} from 'src/writeData/constants'
// Utils
import {pageTitleSuffixer} from 'src/shared/utils/pageTitles'
interface Props {
content: WriteDataSection
}
const WriteDataIndexView: FC<Props> = ({children, content}) => {
const {items, name} = content
return (
<>
<Page titleTag={pageTitleSuffixer([name, 'Load Data'])}>
<Page.Header fullWidth={false}>
<Page.Title title={name} />
</Page.Header>
<Page.Contents fullWidth={false} scrollable={true}>
<SquareGrid cardSize="200px" gutter={ComponentSize.Small}>
{items.map(item => (
<WriteDataItem
key={item.id}
id={item.id}
name={item.name}
url={item.url}
image={item.image}
/>
))}
</SquareGrid>
</Page.Contents>
</Page>
{children}
</>
)
}
export default WriteDataIndexView

View File

@ -1,20 +0,0 @@
@import '@influxdata/clockface/dist/variables.scss';
.write-data--item.cf-selectable-card.cf-selectable-card__xs
.cf-selectable-card--label {
font-size: 14px;
line-height: 14px;
padding-bottom: 11px;
}
.write-data--item-thumb {
width: 90%;
height: 90%;
background-size: contain;
background-repeat: no-repeat;
background-position: center center;
}
.write-data--item.cf-selectable-card .cf-selectable-card--children {
position: relative;
}

View File

@ -1,70 +0,0 @@
// Libraries
import React, {FC} from 'react'
import {connect} from 'react-redux'
import {withRouter, RouteComponentProps} from 'react-router-dom'
// Components
import {SelectableCard, SquareGrid, ComponentSize} from '@influxdata/clockface'
// Utils
import {getOrg} from 'src/organizations/selectors'
// Graphics
import placeholderLogo from 'src/writeData/graphics/placeholderLogo.svg'
// Types
import {WriteDataItem} from 'src/writeData/constants'
import {AppState} from 'src/types'
// Constants
import {ORGS} from 'src/shared/constants/routes'
// Styles
import 'src/writeData/components/WriteDataItem.scss'
interface StateProps {
orgID: string
}
type Props = WriteDataItem & RouteComponentProps & StateProps
const WriteDataItem: FC<Props> = ({id, name, url, image, history, orgID}) => {
const handleClick = (): void => {
history.push(`/${ORGS}/${orgID}/load-data/${url}`)
}
let thumbnailStyle = {backgroundImage: `url(${placeholderLogo})`}
if (image) {
// TODO: Won't need this one images are imported correctly
const filePathIsCorrect = !image.replace(/[/]([\w\d])\w+[.]svg/, '').length
if (filePathIsCorrect) {
thumbnailStyle = {backgroundImage: `url(${image})`}
}
}
return (
<SquareGrid.Card key={id}>
<SelectableCard
id={id}
formName="load-data-cards"
label={name}
testID={`load-data-item ${id}`}
selected={false}
onClick={handleClick}
fontSize={ComponentSize.ExtraSmall}
className="write-data--item"
>
<div className="write-data--item-thumb" style={thumbnailStyle} />
</SelectableCard>
</SquareGrid.Card>
)
}
const mstp = (state: AppState) => {
const {id} = getOrg(state)
return {orgID: id}
}
export default connect<StateProps>(mstp)(withRouter(WriteDataItem))

View File

@ -1,28 +0,0 @@
// Libraries
import React, {FC, ChangeEvent, useContext} from 'react'
// Contexts
import {WriteDataSearchContext} from 'src/writeData/containers/WriteDataPage'
// Components
import {Input, InputRef, ComponentSize, IconFont} from '@influxdata/clockface'
const WriteDataSearchBar: FC = () => {
const {searchTerm, setSearchTerm} = useContext(WriteDataSearchContext)
const handleInputChange = (e: ChangeEvent<InputRef>): void => {
setSearchTerm(e.target.value)
}
return (
<Input
placeholder="Search data writing methods..."
value={searchTerm}
size={ComponentSize.Large}
icon={IconFont.Search}
onChange={handleInputChange}
autoFocus={true}
/>
)
}
export default WriteDataSearchBar

View File

@ -1,73 +0,0 @@
// Libraries
import React, {FC, useContext} from 'react'
// Contexts
import {WriteDataSearchContext} from 'src/writeData/containers/WriteDataPage'
// Components
import {
SquareGrid,
ComponentSize,
Heading,
HeadingElement,
FontWeight,
} from '@influxdata/clockface'
import WriteDataItem from 'src/writeData/components/WriteDataItem'
// Constants
import {doesItemMatchSearchTerm} from 'src/writeData/constants'
// Types
import {WriteDataSection} from 'src/writeData/constants'
const WriteDataSection: FC<Omit<WriteDataSection, 'featureFlag'>> = ({
id,
name,
description,
items,
}) => {
const {searchTerm} = useContext(WriteDataSearchContext)
const filteredItems = items.filter(item =>
doesItemMatchSearchTerm(item.name, searchTerm)
)
const sortedItems = filteredItems.sort((a, b) =>
a.name.toLowerCase().localeCompare(b.name.toLowerCase())
)
return (
<div
className="write-data--section"
data-testid={`write-data--section ${id}`}
>
<Heading
element={HeadingElement.H2}
weight={FontWeight.Regular}
style={{marginTop: '24px', marginBottom: '4px'}}
>
{name}
</Heading>
<Heading
element={HeadingElement.H5}
weight={FontWeight.Regular}
style={{marginBottom: '12px'}}
>
{description}
</Heading>
<SquareGrid cardSize="170px" gutter={ComponentSize.Small}>
{sortedItems.map(item => (
<WriteDataItem
key={item.id}
id={item.id}
name={item.name}
image={item.image}
url={item.url}
/>
))}
</SquareGrid>
</div>
)
}
export default WriteDataSection

View File

@ -1,55 +0,0 @@
// Libraries
import React, {FC, useContext} from 'react'
// Contexts
import {WriteDataSearchContext} from 'src/writeData/containers/WriteDataPage'
// Constants
import {
WRITE_DATA_SECTIONS,
sectionContainsMatchingItems,
} from 'src/writeData/constants'
// Utils
import {isFlagEnabled} from 'src/shared/utils/featureFlag'
// Components
import {EmptyState, ComponentSize} from '@influxdata/clockface'
import WriteDataSection from 'src/writeData/components/WriteDataSection'
const WriteDataSections: FC = () => {
const {searchTerm} = useContext(WriteDataSearchContext)
const filteredSections = WRITE_DATA_SECTIONS.filter(section => {
const containsMatches = sectionContainsMatchingItems(section, searchTerm)
const featureFlagEnabled = isFlagEnabled(section.featureFlag)
return containsMatches && featureFlagEnabled
})
if (!filteredSections.length) {
return (
<EmptyState size={ComponentSize.Large}>
<h4>
Nothing matched <strong>{`"${searchTerm}"`}</strong>
</h4>
</EmptyState>
)
}
return (
<>
{filteredSections.map(section => (
<WriteDataSection
key={section.id}
id={section.id}
name={section.name}
description={section.description}
items={section.items}
/>
))}
</>
)
}
export default WriteDataSections

View File

@ -1,186 +0,0 @@
For more detailed and up to date information check out the [GitHub Respository](https://github.com/tobiasschuerg/InfluxDB-Client-for-Arduino)
##### Install Library
Library Manager
```
1. Open the Arduino IDE and click to the "Sketch" menu and then Include Library > Manage Libraries.
2. Type 'influxdb' in the search box
3. Install the 'InfluxDBClient for Arduino' library
```
Manual Installation
```
1. cd <arduino-sketch-location>/library.
2. git clone https://github.com/tobiasschuerg/InfluxDB-Client-for-Arduino
3. Restart the Arduino IDE
```
##### Initialize the Client
```
#if defined(ESP32)
#include <WiFiMulti.h>
WiFiMulti wifiMulti;
#define DEVICE "ESP32"
#elif defined(ESP8266)
#include <ESP8266WiFiMulti.h>
ESP8266WiFiMulti wifiMulti;
#define DEVICE "ESP8266"
#endif
#include <InfluxDbClient.h>
#include <InfluxDbCloud.h>
// WiFi AP SSID
#define WIFI_SSID "SSID"
// WiFi password
#define WIFI_PASSWORD "PASSWORD"
// InfluxDB v2 server url, e.g. https://eu-central-1-1.aws.cloud2.influxdata.com (Use: InfluxDB UI -> Load Data -> Client Libraries)
#define INFLUXDB_URL "<%= server %>"
// InfluxDB v2 server or cloud API authentication token (Use: InfluxDB UI -> Data -> Tokens -> <select token>)
#define INFLUXDB_TOKEN "<%= token %>"
// InfluxDB v2 organization id (Use: InfluxDB UI -> User -> About -> Common Ids )
#define INFLUXDB_ORG "<%= org %>"
// InfluxDB v2 bucket name (Use: InfluxDB UI -> Data -> Buckets)
#define INFLUXDB_BUCKET "<%= bucket %>"
// Set timezone string according to https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
// Examples:
// Pacific Time: "PST8PDT"
// Eastern: "EST5EDT"
// Japanesse: "JST-9"
// Central Europe: "CET-1CEST,M3.5.0,M10.5.0/3"
#define TZ_INFO "CET-1CEST,M3.5.0,M10.5.0/3"
// InfluxDB client instance with preconfigured InfluxCloud certificate
InfluxDBClient client(INFLUXDB_URL, INFLUXDB_ORG, INFLUXDB_BUCKET, INFLUXDB_TOKEN, InfluxDbCloud2CACert);
// Data point
Point sensor("wifi_status");
void setup() {
Serial.begin(115200);
// Setup wifi
WiFi.mode(WIFI_STA);
wifiMulti.addAP(WIFI_SSID, WIFI_PASSWORD);
Serial.print("Connecting to wifi");
while (wifiMulti.run() != WL_CONNECTED) {
Serial.print(".");
delay(100);
}
Serial.println();
// Add tags
sensor.addTag("device", DEVICE);
sensor.addTag("SSID", WiFi.SSID());
// Accurate time is necessary for certificate validation and writing in batches
// For the fastest time sync find NTP servers in your area: https://www.pool.ntp.org/zone/
// Syncing progress and the time will be printed to Serial.
timeSync(TZ_INFO, "pool.ntp.org", "time.nis.gov");
// Check server connection
if (client.validateConnection()) {
Serial.print("Connected to InfluxDB: ");
Serial.println(client.getServerUrl());
} else {
Serial.print("InfluxDB connection failed: ");
Serial.println(client.getLastErrorMessage());
}
}
```
##### Write Data
```
void loop() {
// Clear fields for reusing the point. Tags will remain untouched
sensor.clearFields();
// Store measured value into point
// Report RSSI of currently connected network
sensor.addField("rssi", WiFi.RSSI());
// Print what are we exactly writing
Serial.print("Writing: ");
Serial.println(sensor.toLineProtocol());
// If no Wifi signal, try to reconnect it
if ((WiFi.RSSI() == 0) && (wifiMulti.run() != WL_CONNECTED)) {
Serial.println("Wifi connection lost");
}
// Write point
if (!client.writePoint(sensor)) {
Serial.print("InfluxDB write failed: ");
Serial.println(client.getLastErrorMessage());
}
//Wait 10s
Serial.println("Wait 10s");
delay(10000);
}
```
##### Execute a Flux query
```
void loop() {
// Construct a Flux query
// Query will find the worst RSSI for last hour for each connected WiFi network with this device
String query = "from(bucket: \\"" INFLUXDB_BUCKET "\\") |> range(start: -1h) |> filter(fn: (r) => r._measurement == \\"wifi_status\\" and r._field == \\"rssi\\"";
query += " and r.device == \\"" DEVICE "\\")";
query += "|> min()";
// Print ouput header
Serial.print("==== ");
Serial.print(selectorFunction);
Serial.println(" ====");
// Print composed query
Serial.print("Querying with: ");
Serial.println(query);
// Send query to the server and get result
FluxQueryResult result = client.query(query);
// Iterate over rows. Even there is just one row, next() must be called at least once.
while (result.next()) {
// Get converted value for flux result column 'SSID'
String ssid = result.getValueByName("SSID").getString();
Serial.print("SSID '");
Serial.print(ssid);
Serial.print("' with RSSI ");
// Get converted value for flux result column '_value' where there is RSSI value
long value = result.getValueByName("_value").getLong();
Serial.print(value);
// Get converted value for the _time column
FluxDateTime time = result.getValueByName("_time").getDateTime();
// Format date-time for printing
// Format string according to http://www.cplusplus.com/reference/ctime/strftime/
String timeStr = time.format("%F %T");
Serial.print(" at ");
Serial.print(timeStr);
Serial.println();
}
// Check if there was an error
if(result.getError() != "") {
Serial.print("Query result error: ");
Serial.println(result.getError());
}
// Close the result
result.close();
}
```

View File

@ -1,104 +0,0 @@
For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-csharp)
##### Install Package
Library Manager
```
Install-Package InfluxDB.Client
```
.NET CLI
```
dotnet add package InfluxDB.Client
```
Package Reference
```
<PackageReference Include="InfluxDB.Client" />
```
Initialize the Client
```
using System;
using System.Threading.Tasks;
using InfluxDB.Client;
using InfluxDB.Client.Api.Domain;
using InfluxDB.Client.Core;
using InfluxDB.Client.Writes;
namespace Examples
{
public class Examples
{
public static async Task Main(string[] args)
{
// You can generate a Token from the "Tokens Tab" in the UI
const string token = "<%= token %>";
const string bucket = "<%= bucket %>";
const string org = "<%= org %>";
var client = InfluxDBClientFactory.Create("<%= server %>", token.ToCharArray());
}
}
}
```
##### Write Data
Option 1: Use InfluxDB Line Protocol to write data
```
const string data = "mem,host=host1 used_percent=23.43234543";
using (var writeApi = client.GetWriteApi())
{
writeApi.WriteRecord(bucket, org, WritePrecision.Ns, data);
}
```
Option 2: Use a Data Point to write data
```
var point = PointData
.Measurement("mem")
.Tag("host", "host1")
.Field("used_percent", 23.43234543)
.Timestamp(DateTime.UtcNow, WritePrecision.Ns);
using (var writeApi = client.GetWriteApi())
{
writeApi.WritePoint(bucket, org, point);
}
```
Option 3: Use POCO and corresponding Class to write data
```
var mem = new Mem { Host = "host1", UsedPercent = 23.43234543, Time = DateTime.UtcNow };
using (var writeApi = client.GetWriteApi())
{
writeApi.WriteMeasurement(bucket, org, WritePrecision.Ns, mem);
}
```
```
// Public class
[Measurement("mem")]
private class Mem
{
[Column("host", IsTag = true)] public string Host { get; set; }
[Column("used_percent")] public double? UsedPercent { get; set; }
[Column(IsTimestamp = true)] public DateTime Time { get; set; }
}
```
##### Execute a Flux query
```
var query = $"from(bucket: \\"{bucket}\\") |> range(start: -1h)";
var tables = await client.GetQueryApi().QueryAsync(query, org)
```

View File

@ -1,19 +0,0 @@
// Libraries
import React, {FC} from 'react'
// Components
import WriteDataIndexView from 'src/writeData/components/WriteDataIndexView'
// Constants
import WRITE_DATA_CLIENT_LIBRARIES_SECTION from 'src/writeData/constants/contentClientLibraries'
const ClientLibrariesIndex: FC = ({children}) => {
return (
<>
<WriteDataIndexView content={WRITE_DATA_CLIENT_LIBRARIES_SECTION} />
{children}
</>
)
}
export default ClientLibrariesIndex

View File

@ -1,89 +0,0 @@
For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-go)
##### Initialize the Client
```
package main
import (
"context"
"fmt"
"github.com/influxdata/influxdb-client-go"
"time"
)
func main() {
// You can generate a Token from the "Tokens Tab" in the UI
const token = "<%= token %>"
const bucket = "<%= bucket %>"
const org = "<%= org %>"
client := influxdb2.NewClient("<%= server %>", token)
// always close client at the end
defer client.Close()
}
```
##### Write Data
Option 1: Use InfluxDB Line Protocol to write data
```
// get non-blocking write client
writeApi := client.WriteApi(org, bucket)
// write line protocol
writeApi.WriteRecord(fmt.Sprintf("stat,unit=temperature avg=%f,max=%f", 23.5, 45.0))
writeApi.WriteRecord(fmt.Sprintf("stat,unit=temperature avg=%f,max=%f", 22.5, 45.0))
// Flush writes
writeApi.Flush()
```
Option 2: Use a Data Point to write data
```
// create point using full params constructor
p := influxdb2.NewPoint("stat",
map[string]string{"unit": "temperature"},
map[string]interface{}{"avg": 24.5, "max": 45},
time.Now())
// write point asynchronously
writeApi.WritePoint(p)
// create point using fluent style
p = influxdb2.NewPointWithMeasurement("stat").
AddTag("unit", "temperature").
AddField("avg", 23.2).
AddField("max", 45).
SetTime(time.Now())
// write point asynchronously
writeApi.WritePoint(p)
// Flush writes
writeApi.Flush()
```
##### Execute a Flux query
```
query := fmt.Sprintf("from(bucket:\\"%v\\")|> range(start: -1h) |> filter(fn: (r) => r._measurement == \\"stat\\")", bucket)
// Get query client
queryApi := client.QueryApi(org)
// get QueryTableResult
result, err := queryApi.Query(context.Background(), query)
if err == nil {
// Iterate over query response
for result.Next() {
// Notice when group key has changed
if result.TableChanged() {
fmt.Printf("table: %s\\n", result.TableMetadata().String())
}
// Access data
fmt.Printf("value: %v\\n", result.Record().Value())
}
// check for an error
if result.Err() != nil {
fmt.Printf("query parsing error: %\\n", result.Err().Error())
}
} else {
panic(err)
}
```

View File

@ -1,108 +0,0 @@
For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-java)
##### Add Dependency
Build with Maven
```
<dependency>
<groupId>com.influxdb</groupId>
<artifactId>influxdb-client-java</artifactId>
<version>1.8.0</version>
</dependency>
```
Build with Gradle
```
dependencies {
compile "com.influxdb:influxdb-client-java:1.8.0"
}
```
##### Initialize the Client
```
package example;
import java.time.Instant;
import java.util.List;
import com.influxdb.annotations.Column;
import com.influxdb.annotations.Measurement;
import com.influxdb.client.InfluxDBClient;
import com.influxdb.client.InfluxDBClientFactory;
import com.influxdb.client.WriteApi;
import com.influxdb.client.domain.WritePrecision;
import com.influxdb.client.write.Point;
import com.influxdb.query.FluxTable;
public class InfluxDB2Example {
public static void main(final String[] args) {
// You can generate a Token from the "Tokens Tab" in the UI
String token = "<%= token %>";
String bucket = "<%= bucket %>";
String org = "<%= org %>";
InfluxDBClient client = InfluxDBClientFactory.create("<%= server %>", token.toCharArray());
}
}
```
##### Write Data
Option 1: Use InfluxDB Line Protocol to write data
```
String data = "mem,host=host1 used_percent=23.43234543";
try (WriteApi writeApi = client.getWriteApi()) {
writeApi.writeRecord(bucket, org, WritePrecision.NS, data);
}
```
Option 2: Use a Data Point to write data
```
Point point = Point
.measurement("mem")
.addTag("host", "host1")
.addField("used_percent", 23.43234543)
.time(Instant.now(), WritePrecision.NS);
try (WriteApi writeApi = client.getWriteApi()) {
writeApi.writePoint(bucket, org, point);
}
```
Option 3: Use POJO and corresponding class to write data
```
Mem mem = new Mem();
mem.host = "host1";
mem.used_percent = 23.43234543;
mem.time = Instant.now();
try (WriteApi writeApi = client.getWriteApi()) {
writeApi.writeMeasurement(bucket, org, WritePrecision.NS, mem);
}
```
```
@Measurement(name = "mem")
public static class Mem {
@Column(tag = true)
String host;
@Column
Double used_percent;
@Column(timestamp = true)
Instant time;
}
```
##### Execute a Flux query
```
String query = String.format("from(bucket: \\"%s\\") |> range(start: -1h)", bucket);
List<FluxTable> tables = client.getQueryApi().query(query, org);
```

View File

@ -1,65 +0,0 @@
For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-java/tree/master/client-kotlin)
##### Add Dependency
Build with Maven
```
<dependency>
<groupId>com.influxdb</groupId>
<artifactId>influxdb-client-kotlin</artifactId>
<version>1.8.0</version>
</dependency>
```
Build with Gradle
```
dependencies {
compile "com.influxdb:influxdb-client-kotlin:1.8.0"
}
```
##### Initialize the Client
```
package example
import com.influxdb.client.kotlin.InfluxDBClientKotlinFactory
import kotlinx.coroutines.channels.consumeEach
import kotlinx.coroutines.channels.filter
import kotlinx.coroutines.channels.take
import kotlinx.coroutines.runBlocking
fun main() = runBlocking {
// You can generate a Token from the "Tokens Tab" in the UI
val token = "<%= token %>"
val org = "<%= org %>"
val bucket = "<%= bucket %>"
val client = InfluxDBClientKotlinFactory.create("<%= server %>", token.toCharArray(), org)
}
```
##### Execute a Flux query
```
val query = ("from(bucket: \\"$bucket\\")"
+ " |> range(start: -1d)"
+ " |> filter(fn: (r) => (r[\\"_measurement\\"] == \\"cpu\\" and r[\\"_field\\"] == \\"usage_system\\"))")
// Result is returned as a stream
val results = client.getQueryKotlinApi().query(query)
// Example of additional result stream processing on client side
results
// filter on client side using \`filter\` built-in operator
.filter { "cpu0" == it.getValueByKey("cpu") }
// take first 20 records
.take(20)
// print results
.consumeEach { println("Measurement: $\{it.measurement}, value: $\{it.value}") }
client.close()
```

View File

@ -1,64 +0,0 @@
For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-js)
##### Install via NPM
```
npm i @influxdata/influxdb-client
```
##### Initialize the Client
```
const {InfluxDB} = require('@influxdata/influxdb-client')
// You can generate a Token from the "Tokens Tab" in the UI
const token = '<%= token %>'
const org = '<%= org %>'
const bucket = '<%= bucket %>'
const client = new InfluxDB({url: '<%= server %>', token: token})
```
##### Write Data
```
const {Point} = require('@influxdata/influxdb-client')
const writeApi = client.getWriteApi(org, bucket)
writeApi.useDefaultTags({host: 'host1'})
const point = new Point('mem')
.floatField('used_percent', 23.43234543)
writeApi.writePoint(point)
writeApi
.close()
.then(() => {
console.log('FINISHED')
})
.catch(e => {
console.error(e)
console.log('\\nFinished ERROR')
})
```
##### Execute a Flux query
```
const queryApi = client.getQueryApi(org)
const query = \`from(bucket: \"\${bucket}\") |> range(start: -1h)\`
queryApi.queryRows(query, {
next(row, tableMeta) {
const o = tableMeta.toObject(row)
console.log(
\`\${o._time} \${o._measurement} in \'\${o.location}\' (\${o.example}): \${o._field}=\${o._value}\`
)
},
error(error) {
console.error(error)
console.log('\\nFinished ERROR')
},
complete() {
console.log('\\nFinished SUCCESS')
},
})
```

View File

@ -1,66 +0,0 @@
For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-php)
##### Install via Composer
```
composer require influxdata/influxdb-client-php
```
##### Initialize the Client
```
use InfluxDB2\\Client;
use InfluxDB2\\Model\\WritePrecision;
use InfluxDB2\\Point;
# You can generate a Token from the "Tokens Tab" in the UI
$token = '<%= token %>';
$org = '<%= org %>';
$bucket = '<%= bucket %>';
$client = new Client([
"url" => "<%= server %>",
"token" => $token,
]);
```
##### Write Data
Option 1: Use InfluxDB Line Protocol to write data
```
$writeApi = $client->createWriteApi();
$data = "mem,host=host1 used_percent=23.43234543";
$writeApi->write($data, WritePrecision::S, $bucket, $org);
```
Option 2: Use a Data Point to write data
```
$point = Point::measurement('mem')
->addTag('host', 'host1')
->addField('used_percent', 23.43234543)
->time(microtime(true));
$writeApi->write($point, WritePrecision::S, $bucket, $org);
```
Option 3: Use an Array structure to write data
```
$dataArray = ['name' => 'cpu',
'tags' => ['host' => 'server_nl', 'region' => 'us'],
'fields' => ['internal' => 5, 'external' => 6],
'time' => microtime(true)];
$writeApi->write($dataArray, WritePrecision::S, $bucket, $org);
```
##### Execute a Flux query
```
$query = "from(bucket: \\"{$bucket}\\") |> range(start: -1h)";
$tables = $client->createQueryApi()->query($query, $org);
```

View File

@ -1,60 +0,0 @@
For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-python)
##### Install Package
```
pip install influxdb-client
```
##### Initialize the Client
```
from datetime import datetime
from influxdb_client import InfluxDBClient, Point, WritePrecision
from influxdb_client.client.write_api import SYNCHRONOUS
# You can generate a Token from the "Tokens Tab" in the UI
token = "<%= token %>"
org = "<%= org %>"
bucket = "<%= bucket %>"
client = InfluxDBClient(url="<%= server %>", token=token)
```
##### Write Data
Option 1: Use InfluxDB Line Protocol to write data
```
write_api = client.write_api(write_options=SYNCHRONOUS)
data = "mem,host=host1 used_percent=23.43234543"
write_api.write(bucket, org, data)
```
Option 2: Use a Data Point to write data
```
point = Point("mem")\\
.tag("host", "host1")\\
.field("used_percent", 23.43234543)\\
.time(datetime.utcnow(), WritePrecision.NS)
write_api.write(bucket, org, point)
```
Option 3: Use a Batch Sequence to write data
```
sequence = ["mem,host=host1 used_percent=23.43234543",
"mem,host=host1 available_percent=15.856523"]
write_api.write(bucket, org, sequence)
```
##### Execute a Flux query
```
query = f'from(bucket: \\"{bucket}\\") |> range(start: -1h)'
tables = client.query_api().query(query, org=org)
```

View File

@ -1,79 +0,0 @@
For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-ruby)
##### Install the Gem
```
gem install influxdb-client
```
##### Initialize the Client
```
require 'influxdb-client'
# You can generate a Token from the "Tokens Tab" in the UI
token = '<%= token %>'
org = '<%= org %>'
bucket = '<%= bucket %>'
client = InfluxDB2::Client.new('<%= server %>', token,
precision: InfluxDB2::WritePrecision::NANOSECOND)
```
##### Write Data
Option 1: Use InfluxDB Line Protocol to write data
```
write_api = client.create_write_api
data = 'mem,host=host1 used_percent=23.43234543'
write_api.write(data: data, bucket: bucket, org: org)
```
Option 2: Use a Data Point to write data
```
point = InfluxDB2::Point.new(name: 'mem')
.add_tag('host', 'host1')
.add_field('used_percent', 23.43234543)
.time(Time.now.utc, InfluxDB2::WritePrecision::NANOSECOND)
write_api.write(data: point, bucket: bucket, org: org)
```
Option 3: Use a Hash to write data
```
hash = {name: 'h2o',
tags: {host: 'aws', region: 'us'},
fields: {level: 5, saturation: '99%'},
time: Time.now.utc}
write_api.write(data: hash, bucket: bucket, org: org)
```
Option 4: Use a Batch Sequence to write data
```
point = InfluxDB2::Point.new(name: 'mem')
.add_tag('host', 'host1')
.add_field('used_percent', 23.43234543)
.time(Time.now.utc, InfluxDB2::WritePrecision::NANOSECOND)
hash = {name: 'h2o',
tags: {host: 'aws', region: 'us'},
fields: {level: 5, saturation: '99%'},
time: Time.now.utc}
data = 'mem,host=host1 used_percent=23.23234543'
write_api.write(data: [point, hash, data], bucket: bucket, org: org)
```
##### Execute a Flux query
```
query = "from(bucket: \\"#{bucket}\\") |> range(start: -1h)"
tables = client.create_query_api.query(query: query, org: org)
```

View File

@ -1,83 +0,0 @@
For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-java/tree/master/client-scala)
##### Add Dependency
Build with sbt
```
libraryDependencies += "com.influxdb" % "influxdb-client-scala" % "1.8.0"
```
Build with Maven
```
<dependency>
<groupId>com.influxdb</groupId>
<artifactId>influxdb-client-scala</artifactId>
<version>1.8.0</version>
</dependency>
```
Build with Gradle
```
dependencies {
compile "com.influxdb:influxdb-client-scala:1.8.0"
}
```
##### Initialize the Client
```
package example
import akka.actor.ActorSystem
import akka.stream.scaladsl.Sink
import com.influxdb.client.scala.InfluxDBClientScalaFactory
import com.influxdb.query.FluxRecord
import scala.concurrent.Await
import scala.concurrent.duration.Duration
object InfluxDB2ScalaExample {
implicit val system: ActorSystem = ActorSystem("it-tests")
def main(args: Array[String]): Unit = {
// You can generate a Token from the "Tokens Tab" in the UI
val token = "<%= token %>"
val org = "<%= org %>"
val bucket = "<%= bucket %>"
val client = InfluxDBClientScalaFactory.create("<%= server %>", token.toCharArray, org)
}
}
```
##### Execute a Flux query
```
val query = (s"""from(bucket: "$bucket")"""
+ " |> range(start: -1d)"
+ " |> filter(fn: (r) => (r[\\"_measurement\\"] == \\"cpu\\" and r[\\"_field\\"] == \\"usage_system\\"))")
// Result is returned as a stream
val results = client.getQueryScalaApi().query(query)
// Example of additional result stream processing on client side
val sink = results
// filter on client side using \`filter\` built-in operator
.filter(it => "cpu0" == it.getValueByKey("cpu"))
// take first 20 records
.take(20)
// print results
.runWith(Sink.foreach[FluxRecord](it => println(s"Measurement: $\{it.getMeasurement}, value: $\{it.getValue}")
))
// wait to finish
Await.result(sink, Duration.Inf)
client.close()
system.terminate()
```

View File

@ -1,43 +0,0 @@
// Libraries
import React, {FC} from 'react'
// Components
import {
Panel,
InfluxColors,
Heading,
FontWeight,
HeadingElement,
} from '@influxdata/clockface'
const TelegrafPluginsExplainer: FC = () => {
return (
<Panel backgroundColor={InfluxColors.Castle} style={{marginBottom: '8px'}}>
<Panel.Header>
<Heading element={HeadingElement.H4} weight={FontWeight.Regular}>
Getting Started with Telegraf
</Heading>
</Panel.Header>
<Panel.Body>
<p>
Telegraf is InfluxDatas data collection agent for collecting and
reporting metrics. Its vast library of input plugins and
plug-and-play architecture lets you quickly and easily collect
metrics from many different sources.
</p>
<p>
You will need to have Telegraf installed in order to use this plugin.
See our handy{' '}
<a
href="https://docs.influxdata.com/telegraf/v1.15/introduction/installation/"
target="_blank"
>
Installation Guide
</a>
</p>
</Panel.Body>
</Panel>
)
}
export default TelegrafPluginsExplainer

View File

@ -1,19 +0,0 @@
// Libraries
import React, {FC} from 'react'
// Components
import WriteDataIndexView from 'src/writeData/components/WriteDataIndexView'
// Constants
import WRITE_DATA_TELEGRAF_PLUGINS_SECTION from 'src/writeData/constants/contentTelegrafPlugins'
const TelegrafPluginsIndex: FC = ({children}) => {
return (
<>
<WriteDataIndexView content={WRITE_DATA_TELEGRAF_PLUGINS_SECTION} />
{children}
</>
)
}
export default TelegrafPluginsIndex

View File

@ -1,88 +0,0 @@
# ActiveMQ Input Plugin
This plugin gather queues, topics & subscribers metrics using ActiveMQ Console API.
### Configuration:
```toml
# Description
[[inputs.activemq]]
## ActiveMQ WebConsole URL
url = "http://127.0.0.1:8161"
## Required ActiveMQ Endpoint
## deprecated in 1.11; use the url option
# server = "192.168.50.10"
# port = 8161
## Credentials for basic HTTP authentication
# username = "admin"
# password = "admin"
## Required ActiveMQ webadmin root path
# webadmin = "admin"
## Maximum time to receive response.
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
```
### Metrics
Every effort was made to preserve the names based on the XML response from the ActiveMQ Console API.
- activemq_queues
- tags:
- name
- source
- port
- fields:
- size
- consumer_count
- enqueue_count
- dequeue_count
+ activemq_topics
- tags:
- name
- source
- port
- fields:
- size
- consumer_count
- enqueue_count
- dequeue_count
- activemq_subscribers
- tags:
- client_id
- subscription_name
- connection_id
- destination_name
- selector
- active
- source
- port
- fields:
- pending_queue_size
- dispatched_queue_size
- dispatched_counter
- enqueue_counter
- dequeue_counter
### Example Output
```
activemq_queues,name=sandra,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000
activemq_queues,name=Test,host=88284b2fe51b,source=localhost,port=8161 dequeue_count=0i,size=0i,consumer_count=0i,enqueue_count=0i 1492610703000000000
activemq_topics,name=ActiveMQ.Advisory.MasterBroker\ ,host=88284b2fe51b,source=localhost,port=8161 size=0i,consumer_count=0i,enqueue_count=1i,dequeue_count=0i 1492610703000000000
activemq_topics,host=88284b2fe51b,name=AAA\,source=localhost,port=8161 size=0i,consumer_count=1i,enqueue_count=0i,dequeue_count=0i 1492610703000000000
activemq_topics,name=ActiveMQ.Advisory.Topic\,source=localhost,port=8161 ,host=88284b2fe51b enqueue_count=1i,dequeue_count=0i,size=0i,consumer_count=0i 1492610703000000000
activemq_topics,name=ActiveMQ.Advisory.Queue\,source=localhost,port=8161 ,host=88284b2fe51b size=0i,consumer_count=0i,enqueue_count=2i,dequeue_count=0i 1492610703000000000
activemq_topics,name=AAAA\ ,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000
activemq_subscribers,connection_id=NOTSET,destination_name=AAA,,source=localhost,port=8161,selector=AA,active=no,host=88284b2fe51b,client_id=AAA,subscription_name=AAA pending_queue_size=0i,dispatched_queue_size=0i,dispatched_counter=0i,enqueue_counter=0i,dequeue_counter=0i 1492610703000000000
```

File diff suppressed because one or more lines are too long

View File

@ -1,93 +0,0 @@
# AMQP Consumer Input Plugin
This plugin provides a consumer for use with AMQP 0-9-1, a prominent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/).
Metrics are read from a topic exchange using the configured queue and binding_key.
Message payload should be formatted in one of the [Telegraf Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
For an introduction to AMQP see:
- https://www.rabbitmq.com/tutorials/amqp-concepts.html
- https://www.rabbitmq.com/getstarted.html
The following defaults are known to work with RabbitMQ:
```toml
[[inputs.amqp_consumer]]
## Broker to consume from.
## deprecated in 1.7; use the brokers option
# url = "amqp://localhost:5672/influxdb"
## Brokers to consume from. If multiple brokers are specified a random broker
## will be selected anytime a connection is established. This can be
## helpful for load balancing when not using a dedicated load balancer.
brokers = ["amqp://localhost:5672/influxdb"]
## Authentication credentials for the PLAIN auth_method.
# username = ""
# password = ""
## Name of the exchange to declare. If unset, no exchange will be declared.
exchange = "telegraf"
## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
# exchange_type = "topic"
## If true, exchange will be passively declared.
# exchange_passive = false
## Exchange durability can be either "transient" or "durable".
# exchange_durability = "durable"
## Additional exchange arguments.
# exchange_arguments = { }
# exchange_arguments = {"hash_property" = "timestamp"}
## AMQP queue name
queue = "telegraf"
## AMQP queue durability can be "transient" or "durable".
queue_durability = "durable"
## If true, queue will be passively declared.
# queue_passive = false
## A binding between the exchange and queue using this binding key is
## created. If unset, no binding is created.
binding_key = "#"
## Maximum number of messages server should give to the worker.
# prefetch_count = 50
## Maximum messages to read from the broker that have not been written by an
## output. For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message from the queue contains 10 metrics and the
## output metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## Auth method. PLAIN and EXTERNAL are supported
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
## described here: https://www.rabbitmq.com/plugins.html
# auth_method = "PLAIN"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Content encoding for message payloads, can be set to "gzip" to or
## "identity" to apply no encoding.
# content_encoding = "identity"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```

View File

@ -1,84 +0,0 @@
# Apache Input Plugin
The Apache plugin collects server performance information using the [`mod_status`](https://httpd.apache.org/docs/2.4/mod/mod_status.html) module of the [Apache HTTP Server](https://httpd.apache.org/).
Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. For information about how to configure your server reference the [module documentation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable).
### Configuration:
```toml
# Read Apache status information (mod_status)
[[inputs.apache]]
## An array of URLs to gather from, must be directed at the machine
## readable version of the mod_status page including the auto query string.
## Default is "http://localhost/server-status?auto".
urls = ["http://localhost/server-status?auto"]
## Credentials for basic HTTP authentication.
# username = "myuser"
# password = "mypassword"
## Maximum time to receive response.
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
```
### Measurements & Fields:
- apache
- BusyWorkers (float)
- BytesPerReq (float)
- BytesPerSec (float)
- ConnsAsyncClosing (float)
- ConnsAsyncKeepAlive (float)
- ConnsAsyncWriting (float)
- ConnsTotal (float)
- CPUChildrenSystem (float)
- CPUChildrenUser (float)
- CPULoad (float)
- CPUSystem (float)
- CPUUser (float)
- IdleWorkers (float)
- Load1 (float)
- Load5 (float)
- Load15 (float)
- ParentServerConfigGeneration (float)
- ParentServerMPMGeneration (float)
- ReqPerSec (float)
- ServerUptimeSeconds (float)
- TotalAccesses (float)
- TotalkBytes (float)
- Uptime (float)
The following fields are collected from the `Scoreboard`, and represent the number of requests in the given state:
- apache
- scboard_closing (float)
- scboard_dnslookup (float)
- scboard_finishing (float)
- scboard_idle_cleanup (float)
- scboard_keepalive (float)
- scboard_logging (float)
- scboard_open (float)
- scboard_reading (float)
- scboard_sending (float)
- scboard_starting (float)
- scboard_waiting (float)
### Tags:
- All measurements have the following tags:
- port
- server
### Example Output:
```
apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerConfigGeneration=3,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 1502489900000000000
```

View File

@ -1,54 +0,0 @@
# APCUPSD Input Plugin
This plugin reads data from an apcupsd daemon over its NIS network protocol.
### Requirements
apcupsd should be installed and it's daemon should be running.
### Configuration
```toml
[[inputs.apcupsd]]
# A list of running apcupsd server to connect to.
# If not provided will default to tcp://127.0.0.1:3551
servers = ["tcp://127.0.0.1:3551"]
## Timeout for dialing server.
timeout = "5s"
```
### Metrics
- apcupsd
- tags:
- serial
- status (string representing the set status_flags)
- ups_name
- model
- fields:
- status_flags ([status-bits][])
- input_voltage
- load_percent
- battery_charge_percent
- time_left_ns
- output_voltage
- internal_temp
- battery_voltage
- input_frequency
- time_on_battery_ns
- battery_date
- nominal_input_voltage
- nominal_battery_voltage
- nominal_power
- firmware
### Example output
```
apcupsd,serial=AS1231515,status=ONLINE,ups_name=name1 time_on_battery=0,load_percent=9.7,time_left_minutes=98,output_voltage=230.4,internal_temp=32.4,battery_voltage=27.4,input_frequency=50.2,input_voltage=230.4,battery_charge_percent=100,status_flags=8i 1490035922000000000
```
[status-bits]: http://www.apcupsd.org/manual/manual.html#status-bits

File diff suppressed because one or more lines are too long

View File

@ -1,35 +0,0 @@
# Azure Storage Queue Input Plugin
This plugin gathers sizes of Azure Storage Queues.
### Configuration:
```toml
# Description
[[inputs.azure_storage_queue]]
## Required Azure Storage Account name
account_name = "mystorageaccount"
## Required Azure Storage Account access key
account_key = "storageaccountaccesskey"
## Set to false to disable peeking age of oldest message (executes faster)
# peek_oldest_message_age = true
```
### Metrics
- azure_storage_queues
- tags:
- queue
- account
- fields:
- size (integer, count)
- oldest_message_age_ns (integer, nanoseconds) Age of message at the head of the queue.
Requires `peek_oldest_message_age` to be configured to `true`.
### Example Output
```
azure_storage_queues,queue=myqueue,account=mystorageaccount oldest_message_age=799714900i,size=7i 1565970503000000000
azure_storage_queues,queue=myemptyqueue,account=mystorageaccount size=0i 1565970502000000000
```

View File

@ -1,89 +0,0 @@
# bcache Input Plugin
Get bcache stat from stats_total directory and dirty_data file.
# Measurements
Meta:
- tags: `backing_dev=dev bcache_dev=dev`
Measurement names:
- dirty_data
- bypassed
- cache_bypass_hits
- cache_bypass_misses
- cache_hit_ratio
- cache_hits
- cache_miss_collisions
- cache_misses
- cache_readaheads
### Description
```
dirty_data
Amount of dirty data for this backing device in the cache. Continuously
updated unlike the cache set's version, but may be slightly off.
bypassed
Amount of IO (both reads and writes) that has bypassed the cache
cache_bypass_hits
cache_bypass_misses
Hits and misses for IO that is intended to skip the cache are still counted,
but broken out here.
cache_hits
cache_misses
cache_hit_ratio
Hits and misses are counted per individual IO as bcache sees them; a
partial hit is counted as a miss.
cache_miss_collisions
Counts instances where data was going to be inserted into the cache from a
cache miss, but raced with a write and data was already present (usually 0
since the synchronization for cache misses was rewritten)
cache_readaheads
Count of times readahead occurred.
```
# Example output
Using this configuration:
```toml
[bcache]
# Bcache sets path
# If not specified, then default is:
# bcachePath = "/sys/fs/bcache"
#
# By default, telegraf gather stats for all bcache devices
# Setting devices will restrict the stats to the specified
# bcache devices.
# bcacheDevs = ["bcache0", ...]
```
When run with:
```
./telegraf --config telegraf.conf --input-filter bcache --test
```
It produces:
```
* Plugin: bcache, Collection 1
> [backing_dev="md10" bcache_dev="bcache0"] bcache_dirty_data value=11639194
> [backing_dev="md10" bcache_dev="bcache0"] bcache_bypassed value=5167704440832
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_bypass_hits value=146270986
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_bypass_misses value=0
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_hit_ratio value=90
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_hits value=511941651
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_miss_collisions value=157678
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_misses value=50647396
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_readaheads value=0
```

View File

@ -1,98 +0,0 @@
# Beanstalkd Input Plugin
The `beanstalkd` plugin collects server stats as well as tube stats (reported by `stats` and `stats-tube` commands respectively).
### Configuration:
```toml
[[inputs.beanstalkd]]
## Server to collect data from
server = "localhost:11300"
## List of tubes to gather stats about.
## If no tubes specified then data gathered for each tube on server reported by list-tubes command
tubes = ["notifications"]
```
### Metrics:
Please see the [Beanstalk Protocol doc](https://raw.githubusercontent.com/kr/beanstalkd/master/doc/protocol.txt) for detailed explanation of `stats` and `stats-tube` commands output.
`beanstalkd_overview` statistical information about the system as a whole
- fields
- cmd_delete
- cmd_pause_tube
- current_jobs_buried
- current_jobs_delayed
- current_jobs_ready
- current_jobs_reserved
- current_jobs_urgent
- current_using
- current_waiting
- current_watching
- pause
- pause_time_left
- total_jobs
- tags
- name
- server (address taken from config)
`beanstalkd_tube` statistical information about the specified tube
- fields
- binlog_current_index
- binlog_max_size
- binlog_oldest_index
- binlog_records_migrated
- binlog_records_written
- cmd_bury
- cmd_delete
- cmd_ignore
- cmd_kick
- cmd_list_tube_used
- cmd_list_tubes
- cmd_list_tubes_watched
- cmd_pause_tube
- cmd_peek
- cmd_peek_buried
- cmd_peek_delayed
- cmd_peek_ready
- cmd_put
- cmd_release
- cmd_reserve
- cmd_reserve_with_timeout
- cmd_stats
- cmd_stats_job
- cmd_stats_tube
- cmd_touch
- cmd_use
- cmd_watch
- current_connections
- current_jobs_buried
- current_jobs_delayed
- current_jobs_ready
- current_jobs_reserved
- current_jobs_urgent
- current_producers
- current_tubes
- current_waiting
- current_workers
- job_timeouts
- max_job_size
- pid
- rusage_stime
- rusage_utime
- total_connections
- total_jobs
- uptime
- tags
- hostname
- id
- server (address taken from config)
- version
### Example Output:
```
beanstalkd_overview,host=server.local,hostname=a2ab22ed12e0,id=232485800aa11b24,server=localhost:11300,version=1.10 cmd_stats_tube=29482i,current_jobs_delayed=0i,current_jobs_urgent=6i,cmd_kick=0i,cmd_stats=7378i,cmd_stats_job=0i,current_waiting=0i,max_job_size=65535i,pid=6i,cmd_bury=0i,cmd_reserve_with_timeout=0i,cmd_touch=0i,current_connections=1i,current_jobs_ready=6i,current_producers=0i,cmd_delete=0i,cmd_list_tubes=7369i,cmd_peek_ready=0i,cmd_put=6i,cmd_use=3i,cmd_watch=0i,current_jobs_reserved=0i,rusage_stime=6.07,cmd_list_tubes_watched=0i,cmd_pause_tube=0i,total_jobs=6i,binlog_records_migrated=0i,cmd_list_tube_used=0i,cmd_peek_delayed=0i,cmd_release=0i,current_jobs_buried=0i,job_timeouts=0i,binlog_current_index=0i,binlog_max_size=10485760i,total_connections=7378i,cmd_peek_buried=0i,cmd_reserve=0i,current_tubes=4i,binlog_records_written=0i,cmd_peek=0i,rusage_utime=1.13,uptime=7099i,binlog_oldest_index=0i,current_workers=0i,cmd_ignore=0i 1528801650000000000
beanstalkd_tube,host=server.local,name=notifications,server=localhost:11300 pause_time_left=0i,current_jobs_buried=0i,current_jobs_delayed=0i,current_jobs_reserved=0i,current_using=0i,current_waiting=0i,pause=0i,total_jobs=3i,cmd_delete=0i,cmd_pause_tube=0i,current_jobs_ready=3i,current_jobs_urgent=3i,current_watching=0i 1528801650000000000
```

View File

@ -1,118 +0,0 @@
# BIND 9 Nameserver Statistics Input Plugin
This plugin decodes the JSON or XML statistics provided by BIND 9 nameservers.
### XML Statistics Channel
Version 2 statistics (BIND 9.6 - 9.9) and version 3 statistics (BIND 9.9+) are supported. Note that
for BIND 9.9 to support version 3 statistics, it must be built with the `--enable-newstats` compile
flag, and it must be specifically requested via the correct URL. Version 3 statistics are the
default (and only) XML format in BIND 9.10+.
### JSON Statistics Channel
JSON statistics schema version 1 (BIND 9.10+) is supported. As of writing, some distros still do
not enable support for JSON statistics in their BIND packages.
### Configuration:
- **urls** []string: List of BIND statistics channel URLs to collect from. Do not include a
trailing slash in the URL. Default is "http://localhost:8053/xml/v3".
- **gather_memory_contexts** bool: Report per-context memory statistics.
- **gather_views** bool: Report per-view query statistics.
The following table summarizes the URL formats which should be used, depending on your BIND
version and configured statistics channel.
| BIND Version | Statistics Format | Example URL |
| ------------ | ----------------- | ----------------------------- |
| 9.6 - 9.8 | XML v2 | http://localhost:8053 |
| 9.9 | XML v2 | http://localhost:8053/xml/v2 |
| 9.9+ | XML v3 | http://localhost:8053/xml/v3 |
| 9.10+ | JSON v1 | http://localhost:8053/json/v1 |
#### Configuration of BIND Daemon
Add the following to your named.conf if running Telegraf on the same host as the BIND daemon:
```
statistics-channels {
inet 127.0.0.1 port 8053;
};
```
Alternatively, specify a wildcard address (e.g., 0.0.0.0) or specific IP address of an interface to
configure the BIND daemon to listen on that address. Note that you should secure the statistics
channel with an ACL if it is publicly reachable. Consult the BIND Administrator Reference Manual
for more information.
### Measurements & Fields:
- bind_counter
- name=value (multiple)
- bind_memory
- total_use
- in_use
- block_size
- context_size
- lost
- bind_memory_context
- total
- in_use
### Tags:
- All measurements
- url
- source
- port
- bind_counter
- type
- view (optional)
- bind_memory_context
- id
- name
### Sample Queries:
These are some useful queries (to generate dashboards or other) to run against data from this
plugin:
```sql
SELECT non_negative_derivative(mean(/^A$|^PTR$/), 5m) FROM bind_counter \
WHERE "url" = 'localhost:8053' AND "type" = 'qtype' AND time > now() - 1h \
GROUP BY time(5m), "type"
```
```
name: bind_counter
tags: type=qtype
time non_negative_derivative_A non_negative_derivative_PTR
---- ------------------------- ---------------------------
1553862000000000000 254.99444444430992 1388.311111111194
1553862300000000000 354 2135.716666666791
1553862600000000000 316.8666666666977 2130.133333333768
1553862900000000000 309.05000000004657 2126.75
1553863200000000000 315.64999999990687 2128.483333332464
1553863500000000000 308.9166666667443 2132.350000000559
1553863800000000000 302.64999999990687 2131.1833333335817
1553864100000000000 310.85000000009313 2132.449999999255
1553864400000000000 314.3666666666977 2136.216666666791
1553864700000000000 303.2333333331626 2133.8166666673496
1553865000000000000 304.93333333334886 2127.333333333023
1553865300000000000 317.93333333334886 2130.3166666664183
1553865600000000000 280.6666666667443 1807.9071428570896
```
### Example Output
Here is example output of this plugin:
```
bind_memory,host=LAP,port=8053,source=localhost,url=localhost:8053 block_size=12058624i,context_size=4575056i,in_use=4113717i,lost=0i,total_use=16663252i 1554276619000000000
bind_counter,host=LAP,port=8053,source=localhost,type=opcode,url=localhost:8053 IQUERY=0i,NOTIFY=0i,QUERY=9i,STATUS=0i,UPDATE=0i 1554276619000000000
bind_counter,host=LAP,port=8053,source=localhost,type=rcode,url=localhost:8053 17=0i,18=0i,19=0i,20=0i,21=0i,22=0i,BADCOOKIE=0i,BADVERS=0i,FORMERR=0i,NOERROR=7i,NOTAUTH=0i,NOTIMP=0i,NOTZONE=0i,NXDOMAIN=0i,NXRRSET=0i,REFUSED=0i,RESERVED11=0i,RESERVED12=0i,RESERVED13=0i,RESERVED14=0i,RESERVED15=0i,SERVFAIL=2i,YXDOMAIN=0i,YXRRSET=0i 1554276619000000000
bind_counter,host=LAP,port=8053,source=localhost,type=qtype,url=localhost:8053 A=1i,ANY=1i,NS=1i,PTR=5i,SOA=1i 1554276619000000000
bind_counter,host=LAP,port=8053,source=localhost,type=nsstat,url=localhost:8053 AuthQryRej=0i,CookieBadSize=0i,CookieBadTime=0i,CookieIn=9i,CookieMatch=0i,CookieNew=9i,CookieNoMatch=0i,DNS64=0i,ECSOpt=0i,ExpireOpt=0i,KeyTagOpt=0i,NSIDOpt=0i,OtherOpt=0i,QryAuthAns=7i,QryBADCOOKIE=0i,QryDropped=0i,QryDuplicate=0i,QryFORMERR=0i,QryFailure=0i,QryNXDOMAIN=0i,QryNXRedir=0i,QryNXRedirRLookup=0i,QryNoauthAns=0i,QryNxrrset=1i,QryRecursion=2i,QryReferral=0i,QrySERVFAIL=2i,QrySuccess=6i,QryTCP=1i,QryUDP=8i,RPZRewrites=0i,RateDropped=0i,RateSlipped=0i,RecQryRej=0i,RecursClients=0i,ReqBadEDNSVer=0i,ReqBadSIG=0i,ReqEdns0=9i,ReqSIG0=0i,ReqTCP=1i,ReqTSIG=0i,Requestv4=9i,Requestv6=0i,RespEDNS0=9i,RespSIG0=0i,RespTSIG=0i,Response=9i,TruncatedResp=0i,UpdateBadPrereq=0i,UpdateDone=0i,UpdateFail=0i,UpdateFwdFail=0i,UpdateRej=0i,UpdateReqFwd=0i,UpdateRespFwd=0i,XfrRej=0i,XfrReqDone=0i 1554276619000000000
bind_counter,host=LAP,port=8053,source=localhost,type=zonestat,url=localhost:8053 AXFRReqv4=0i,AXFRReqv6=0i,IXFRReqv4=0i,IXFRReqv6=0i,NotifyInv4=0i,NotifyInv6=0i,NotifyOutv4=0i,NotifyOutv6=0i,NotifyRej=0i,SOAOutv4=0i,SOAOutv6=0i,XfrFail=0i,XfrSuccess=0i 1554276619000000000
bind_counter,host=LAP,port=8053,source=localhost,type=sockstat,url=localhost:8053 FDWatchClose=0i,FDwatchConn=0i,FDwatchConnFail=0i,FDwatchRecvErr=0i,FDwatchSendErr=0i,FdwatchBindFail=0i,RawActive=1i,RawClose=0i,RawOpen=1i,RawOpenFail=0i,RawRecvErr=0i,TCP4Accept=6i,TCP4AcceptFail=0i,TCP4Active=9i,TCP4BindFail=0i,TCP4Close=5i,TCP4Conn=0i,TCP4ConnFail=0i,TCP4Open=8i,TCP4OpenFail=0i,TCP4RecvErr=0i,TCP4SendErr=0i,TCP6Accept=0i,TCP6AcceptFail=0i,TCP6Active=2i,TCP6BindFail=0i,TCP6Close=0i,TCP6Conn=0i,TCP6ConnFail=0i,TCP6Open=2i,TCP6OpenFail=0i,TCP6RecvErr=0i,TCP6SendErr=0i,UDP4Active=18i,UDP4BindFail=14i,UDP4Close=14i,UDP4Conn=0i,UDP4ConnFail=0i,UDP4Open=32i,UDP4OpenFail=0i,UDP4RecvErr=0i,UDP4SendErr=0i,UDP6Active=3i,UDP6BindFail=0i,UDP6Close=6i,UDP6Conn=0i,UDP6ConnFail=6i,UDP6Open=9i,UDP6OpenFail=0i,UDP6RecvErr=0i,UDP6SendErr=0i,UnixAccept=0i,UnixAcceptFail=0i,UnixActive=0i,UnixBindFail=0i,UnixClose=0i,UnixConn=0i,UnixConnFail=0i,UnixOpen=0i,UnixOpenFail=0i,UnixRecvErr=0i,UnixSendErr=0i 1554276619000000000
```

View File

@ -1,85 +0,0 @@
# Bond Input Plugin
The Bond input plugin collects network bond interface status for both the
network bond interface as well as slave interfaces.
The plugin collects these metrics from `/proc/net/bonding/*` files.
### Configuration:
```toml
[[inputs.bond]]
## Sets 'proc' directory path
## If not specified, then default is /proc
# host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
# bond_interfaces = ["bond0"]
```
### Measurements & Fields:
- bond
- active_slave (for active-backup mode)
- status
- bond_slave
- failures
- status
### Description:
```
active_slave
Currently active slave interface for active-backup mode.
status
Status of bond interface or bonds's slave interface (down = 0, up = 1).
failures
Amount of failures for bond's slave interface.
```
### Tags:
- bond
- bond
- bond_slave
- bond
- interface
### Example output:
Configuration:
```
[[inputs.bond]]
## Sets 'proc' directory path
## If not specified, then default is /proc
host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
bond_interfaces = ["bond0", "bond1"]
```
Run:
```
telegraf --config telegraf.conf --input-filter bond --test
```
Output:
```
* Plugin: inputs.bond, Collection 1
> bond,bond=bond1,host=local active_slave="eth0",status=1i 1509704525000000000
> bond_slave,bond=bond1,interface=eth0,host=local status=1i,failures=0i 1509704525000000000
> bond_slave,host=local,bond=bond1,interface=eth1 status=1i,failures=0i 1509704525000000000
> bond,bond=bond0,host=isvetlov-mac.local status=1i 1509704525000000000
> bond_slave,bond=bond0,interface=eth1,host=local status=1i,failures=0i 1509704525000000000
> bond_slave,bond=bond0,interface=eth2,host=local status=1i,failures=0i 1509704525000000000
```

View File

@ -1,102 +0,0 @@
# Burrow Kafka Consumer Lag Checking Input Plugin
Collect Kafka topic, consumer and partition status
via [Burrow](https://github.com/linkedin/Burrow) HTTP [API](https://github.com/linkedin/Burrow/wiki/HTTP-Endpoint).
Supported Burrow version: `1.x`
### Configuration
```toml
[[inputs.burrow]]
## Burrow API endpoints in format "schema://host:port".
## Default is "http://localhost:8000".
servers = ["http://localhost:8000"]
## Override Burrow API prefix.
## Useful when Burrow is behind reverse-proxy.
# api_prefix = "/v3/kafka"
## Maximum time to receive response.
# response_timeout = "5s"
## Limit per-server concurrent connections.
## Useful in case of large number of topics or consumer groups.
# concurrent_connections = 20
## Filter clusters, default is no filtering.
## Values can be specified as glob patterns.
# clusters_include = []
# clusters_exclude = []
## Filter consumer groups, default is no filtering.
## Values can be specified as glob patterns.
# groups_include = []
# groups_exclude = []
## Filter topics, default is no filtering.
## Values can be specified as glob patterns.
# topics_include = []
# topics_exclude = []
## Credentials for basic HTTP authentication.
# username = ""
# password = ""
## Optional SSL config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
# insecure_skip_verify = false
```
### Group/Partition Status mappings
* `OK` = 1
* `NOT_FOUND` = 2
* `WARN` = 3
* `ERR` = 4
* `STOP` = 5
* `STALL` = 6
> unknown value will be mapped to 0
### Fields
* `burrow_group` (one event per each consumer group)
- status (string, see Partition Status mappings)
- status_code (int, `1..6`, see Partition status mappings)
- partition_count (int, `number of partitions`)
- offset (int64, `total offset of all partitions`)
- total_lag (int64, `totallag`)
- lag (int64, `maxlag.current_lag || 0`)
- timestamp (int64, `end.timestamp`)
* `burrow_partition` (one event per each topic partition)
- status (string, see Partition Status mappings)
- status_code (int, `1..6`, see Partition status mappings)
- lag (int64, `current_lag || 0`)
- offset (int64, `end.timestamp`)
- timestamp (int64, `end.timestamp`)
* `burrow_topic` (one event per topic offset)
- offset (int64)
### Tags
* `burrow_group`
- cluster (string)
- group (string)
* `burrow_partition`
- cluster (string)
- group (string)
- topic (string)
- partition (int)
- owner (string)
* `burrow_topic`
- cluster (string)
- topic (string)
- partition (int)

View File

@ -1,127 +0,0 @@
# Cassandra Input Plugin
### **Deprecated in version 1.7**: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin with the [cassandra.conf](/plugins/inputs/jolokia2/examples/cassandra.conf) example configuration.
#### Plugin arguments:
- **context** string: Context root used for jolokia url
- **servers** []string: List of servers with the format "<user:passwd@><host>:port"
- **metrics** []string: List of Jmx paths that identify mbeans attributes
#### Description
The Cassandra plugin collects Cassandra 3 / JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured.
See: https://jolokia.org/ and [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html)
# Measurements:
Cassandra plugin produces one or more measurements for each metric configured, adding Server's name as `host` tag. More than one measurement is generated when querying table metrics with a wildcard for the keyspace or table name.
Given a configuration like:
```toml
[[inputs.cassandra]]
context = "/jolokia/read"
servers = [":8778"]
metrics = ["/java.lang:type=Memory/HeapMemoryUsage"]
```
The collected metrics will be:
```
javaMemory,host=myHost,mname=HeapMemoryUsage HeapMemoryUsage_committed=1040187392,HeapMemoryUsage_init=1050673152,HeapMemoryUsage_max=1040187392,HeapMemoryUsage_used=368155000 1459551767230567084
```
# Useful Metrics:
Here is a list of metrics that might be useful to monitor your cassandra cluster. This was put together from multiple sources on the web.
- [How to monitor Cassandra performance metrics](https://www.datadoghq.com/blog/how-to-monitor-cassandra-performance-metrics)
- [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html)
#### measurement = javaGarbageCollector
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount
- /java.lang:type=GarbageCollector,name=ParNew/CollectionTime
- /java.lang:type=GarbageCollector,name=ParNew/CollectionCount
#### measurement = javaMemory
- /java.lang:type=Memory/HeapMemoryUsage
- /java.lang:type=Memory/NonHeapMemoryUsage
#### measurement = cassandraCache
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hits
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Entries
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Size
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Hits
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Entries
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity
#### measurement = cassandraClient
- /org.apache.cassandra.metrics:type=Client,name=connectedNativeClients
#### measurement = cassandraClientRequest
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Latency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Latency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Timeouts
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Timeouts
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Unavailables
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Unavailables
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures
#### measurement = cassandraCommitLog
- /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks
- /org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize
#### measurement = cassandraCompaction
- /org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks
- /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks
- /org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted
- /org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted
#### measurement = cassandraStorage
- /org.apache.cassandra.metrics:type=Storage,name=Load
- /org.apache.cassandra.metrics:type=Storage,name=Exceptions
#### measurement = cassandraTable
Using wildcards for "keyspace" and "scope" can create a lot of series as metrics will be reported for every table and keyspace including internal system tables. Specify a keyspace name and/or a table name to limit them.
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=LiveDiskSpaceUsed
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=TotalDiskSpaceUsed
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=CoordinatorReadLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadTotalLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency
#### measurement = cassandraThreadPools
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks

View File

@ -1,386 +0,0 @@
# Ceph Storage Input Plugin
Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
Ceph has introduced a Telegraf and Influx plugin in the 13.x Mimic release. The Telegraf module sends to a Telegraf configured with a socket_listener. [Learn more in their docs](http://docs.ceph.com/docs/mimic/mgr/telegraf/)
*Admin Socket Stats*
This gatherer works by scanning the configured SocketDir for OSD, MON, MDS and RGW socket files. When it finds
a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump**
The resulting JSON is parsed and grouped into collections, based on top-level key. Top-level keys are
used as collection tags, and all sub-keys are flattened. For example:
```json
{
"paxos": {
"refresh": 9363435,
"refresh_latency": {
"avgcount": 9363435,
"sum": 5378.794002000
}
}
}
```
Would be parsed into the following metrics, all of which would be tagged with collection=paxos:
- refresh = 9363435
- refresh_latency.avgcount: 9363435
- refresh_latency.sum: 5378.794002000
*Cluster Stats*
This gatherer works by invoking ceph commands against the cluster thus only requires the ceph client, valid
ceph configuration and an access key to function (the ceph_config and ceph_user configuration variables work
in conjunction to specify these prerequisites). It may be run on any server you wish which has access to
the cluster. The currently supported commands are:
* ceph status
* ceph df
* ceph osd pool stats
### Configuration:
```toml
# Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
[[inputs.ceph]]
## This is the recommended interval to poll. Too frequent and you will lose
## data points due to timeouts during rebalancing and recovery
interval = '1m'
## All configuration values are optional, defaults are shown below
## location of ceph binary
ceph_binary = "/usr/bin/ceph"
## directory in which to look for socket files
socket_dir = "/var/run/ceph"
## prefix of MON and OSD socket files, used to determine socket type
mon_prefix = "ceph-mon"
osd_prefix = "ceph-osd"
mds_prefix = "ceph-mds"
rgw_prefix = "ceph-client"
## suffix used to identify socket files
socket_suffix = "asok"
## Ceph user to authenticate as, ceph will search for the corresponding keyring
## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the
## client section of ceph.conf for example:
##
## [client.telegraf]
## keyring = /etc/ceph/client.telegraf.keyring
##
## Consult the ceph documentation for more detail on keyring generation.
ceph_user = "client.admin"
## Ceph configuration to use to locate the cluster
ceph_config = "/etc/ceph/ceph.conf"
## Whether to gather statistics via the admin socket
gather_admin_socket_stats = true
## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config
## to be specified
gather_cluster_stats = false
```
### Metrics:
*Admin Socket Stats*
All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go.
All admin measurements will have the following tags:
- type: either 'osd', 'mon', 'mds' or 'rgw' to indicate which type of node was queried
- id: a unique string identifier, parsed from the socket file name for the node
- collection: the top-level key under which these fields were reported. Possible values are:
- for MON nodes:
- cluster
- leveldb
- mon
- paxos
- throttle-mon_client_bytes
- throttle-mon_daemon_bytes
- throttle-msgr_dispatch_throttler-mon
- for OSD nodes:
- WBThrottle
- filestore
- leveldb
- mutex-FileJournal::completions_lock
- mutex-FileJournal::finisher_lock
- mutex-FileJournal::write_lock
- mutex-FileJournal::writeq_lock
- mutex-JOS::ApplyManager::apply_lock
- mutex-JOS::ApplyManager::com_lock
- mutex-JOS::SubmitManager::lock
- mutex-WBThrottle::lock
- objecter
- osd
- recoverystate_perf
- throttle-filestore_bytes
- throttle-filestore_ops
- throttle-msgr_dispatch_throttler-client
- throttle-msgr_dispatch_throttler-cluster
- throttle-msgr_dispatch_throttler-hb_back_server
- throttle-msgr_dispatch_throttler-hb_front_serve
- throttle-msgr_dispatch_throttler-hbclient
- throttle-msgr_dispatch_throttler-ms_objecter
- throttle-objecter_bytes
- throttle-objecter_ops
- throttle-osd_client_bytes
- throttle-osd_client_messages
- for MDS nodes:
- AsyncMessenger::Worker-0
- AsyncMessenger::Worker-1
- AsyncMessenger::Worker-2
- finisher-PurgeQueue
- mds
- mds_cache
- mds_log
- mds_mem
- mds_server
- mds_sessions
- objecter
- purge_queue
- throttle-msgr_dispatch_throttler-mds
- throttle-objecter_bytes
- throttle-objecter_ops
- throttle-write_buf_throttle
- for RGW nodes:
- AsyncMessenger::Worker-0
- AsyncMessenger::Worker-1
- AsyncMessenger::Worker-2
- cct
- finisher-radosclient
- mempool
- objecter
- rgw
- simple-throttler
- throttle-msgr_dispatch_throttler-radosclient
- throttle-objecter_bytes
- throttle-objecter_ops
- throttle-rgw_async_rados_ops
*Cluster Stats*
+ ceph_health
- fields:
- status
- overall_status
- ceph_osdmap
- fields:
- epoch (float)
- num_osds (float)
- num_up_osds (float)
- num_in_osds (float)
- full (bool)
- nearfull (bool)
- num_remapped_pgs (float)
+ ceph_pgmap
- fields:
- version (float)
- num_pgs (float)
- data_bytes (float)
- bytes_used (float)
- bytes_avail (float)
- bytes_total (float)
- read_bytes_sec (float)
- write_bytes_sec (float)
- op_per_sec (float, exists only in ceph <10)
- read_op_per_sec (float)
- write_op_per_sec (float)
- ceph_pgmap_state
- tags:
- state
- fields:
- count (float)
+ ceph_usage
- fields:
- total_bytes (float)
- total_used_bytes (float)
- total_avail_bytes (float)
- total_space (float, exists only in ceph <0.84)
- total_used (float, exists only in ceph <0.84)
- total_avail (float, exists only in ceph <0.84)
- ceph_pool_usage
- tags:
- name
- fields:
- kb_used (float)
- bytes_used (float)
- objects (float)
- percent_used (float)
- max_avail (float)
+ ceph_pool_stats
- tags:
- name
- fields:
- read_bytes_sec (float)
- write_bytes_sec (float)
- op_per_sec (float, exists only in ceph <10)
- read_op_per_sec (float)
- write_op_per_sec (float)
- recovering_objects_per_sec (float)
- recovering_bytes_per_sec (float)
- recovering_keys_per_sec (float)
### Example Output:
*Cluster Stats*
```
ceph_health,host=stefanmon1 overall_status="",status="HEALTH_WARN" 1587118504000000000
ceph_osdmap,host=stefanmon1 epoch=203,full=false,nearfull=false,num_in_osds=8,num_osds=9,num_remapped_pgs=0,num_up_osds=8 1587118504000000000
ceph_pgmap,host=stefanmon1 bytes_avail=849879302144,bytes_total=858959904768,bytes_used=9080602624,data_bytes=5055,num_pgs=504,read_bytes_sec=0,read_op_per_sec=0,version=0,write_bytes_sec=0,write_op_per_sec=0 1587118504000000000
ceph_pgmap_state,host=stefanmon1,state=active+clean count=504 1587118504000000000
ceph_usage,host=stefanmon1 total_avail_bytes=849879302144,total_bytes=858959904768,total_used_bytes=196018176 1587118505000000000
ceph_pool_usage,host=stefanmon1,name=cephfs_data bytes_used=0,kb_used=0,max_avail=285804986368,objects=0,percent_used=0 1587118505000000000
ceph_pool_stats,host=stefanmon1,name=cephfs_data read_bytes_sec=0,read_op_per_sec=0,recovering_bytes_per_sec=0,recovering_keys_per_sec=0,recovering_objects_per_sec=0,write_bytes_sec=0,write_op_per_sec=0 1587118506000000000
```
*Admin Socket Stats*
```
> ceph,collection=cct,host=stefanmon1,id=stefanmon1,type=monitor total_workers=0,unhealthy_workers=0 1587117563000000000
> ceph,collection=mempool,host=stefanmon1,id=stefanmon1,type=monitor bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=719152,buffer_anon_items=192,buffer_meta_bytes=352,buffer_meta_items=4,mds_co_bytes=0,mds_co_items=0,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=15872,osdmap_items=138,osdmap_mapping_bytes=63112,osdmap_mapping_items=7626,pgmap_bytes=38680,pgmap_items=477,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117563000000000
> ceph,collection=throttle-mon_client_bytes,host=stefanmon1,id=stefanmon1,type=monitor get=1041157,get_or_fail_fail=0,get_or_fail_success=1041157,get_started=0,get_sum=64928901,max=104857600,put=1041157,put_sum=64928901,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000
> ceph,collection=throttle-msgr_dispatch_throttler-mon,host=stefanmon1,id=stefanmon1,type=monitor get=12695426,get_or_fail_fail=0,get_or_fail_success=12695426,get_started=0,get_sum=42542216884,max=104857600,put=12695426,put_sum=42542216884,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000
> ceph,collection=finisher-mon_finisher,host=stefanmon1,id=stefanmon1,type=monitor complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117563000000000
> ceph,collection=finisher-monstore,host=stefanmon1,id=stefanmon1,type=monitor complete_latency.avgcount=1609831,complete_latency.avgtime=0.015857621,complete_latency.sum=25528.09131035,queue_len=0 1587117563000000000
> ceph,collection=mon,host=stefanmon1,id=stefanmon1,type=monitor election_call=25,election_lose=0,election_win=22,num_elections=94,num_sessions=3,session_add=174679,session_rm=439316,session_trim=137 1587117563000000000
> ceph,collection=throttle-mon_daemon_bytes,host=stefanmon1,id=stefanmon1,type=monitor get=72697,get_or_fail_fail=0,get_or_fail_success=72697,get_started=0,get_sum=32261199,max=419430400,put=72697,put_sum=32261199,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000
> ceph,collection=rocksdb,host=stefanmon1,id=stefanmon1,type=monitor compact=1,compact_queue_len=0,compact_queue_merge=1,compact_range=19126,get=62449211,get_latency.avgcount=62449211,get_latency.avgtime=0.000022216,get_latency.sum=1387.371811726,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=0,submit_latency.avgtime=0,submit_latency.sum=0,submit_sync_latency.avgcount=3219961,submit_sync_latency.avgtime=0.007532173,submit_sync_latency.sum=24253.303584224,submit_transaction=0,submit_transaction_sync=3219961 1587117563000000000
> ceph,collection=AsyncMessenger::Worker-0,host=stefanmon1,id=stefanmon1,type=monitor msgr_active_connections=148317,msgr_created_connections=162806,msgr_recv_bytes=11557888328,msgr_recv_messages=5113369,msgr_running_fast_dispatch_time=0,msgr_running_recv_time=868.377161686,msgr_running_send_time=1626.525392721,msgr_running_total_time=4222.235694322,msgr_send_bytes=91516226816,msgr_send_messages=6973706 1587117563000000000
> ceph,collection=AsyncMessenger::Worker-2,host=stefanmon1,id=stefanmon1,type=monitor msgr_active_connections=146396,msgr_created_connections=159788,msgr_recv_bytes=2162802496,msgr_recv_messages=689168,msgr_running_fast_dispatch_time=0,msgr_running_recv_time=164.148550562,msgr_running_send_time=153.462890368,msgr_running_total_time=644.188791379,msgr_send_bytes=7422484152,msgr_send_messages=749381 1587117563000000000
> ceph,collection=cluster,host=stefanmon1,id=stefanmon1,type=monitor num_bytes=5055,num_mon=3,num_mon_quorum=3,num_object=245,num_object_degraded=0,num_object_misplaced=0,num_object_unfound=0,num_osd=9,num_osd_in=8,num_osd_up=8,num_pg=504,num_pg_active=504,num_pg_active_clean=504,num_pg_peering=0,num_pool=17,osd_bytes=858959904768,osd_bytes_avail=849889787904,osd_bytes_used=9070116864,osd_epoch=203 1587117563000000000
> ceph,collection=paxos,host=stefanmon1,id=stefanmon1,type=monitor accept_timeout=1,begin=1609847,begin_bytes.avgcount=1609847,begin_bytes.sum=41408662074,begin_keys.avgcount=1609847,begin_keys.sum=4829541,begin_latency.avgcount=1609847,begin_latency.avgtime=0.007213392,begin_latency.sum=11612.457661116,collect=0,collect_bytes.avgcount=0,collect_bytes.sum=0,collect_keys.avgcount=0,collect_keys.sum=0,collect_latency.avgcount=0,collect_latency.avgtime=0,collect_latency.sum=0,collect_timeout=1,collect_uncommitted=17,commit=1609831,commit_bytes.avgcount=1609831,commit_bytes.sum=41087428442,commit_keys.avgcount=1609831,commit_keys.sum=11637931,commit_latency.avgcount=1609831,commit_latency.avgtime=0.006236333,commit_latency.sum=10039.442388355,lease_ack_timeout=0,lease_timeout=0,new_pn=33,new_pn_latency.avgcount=33,new_pn_latency.avgtime=3.844272773,new_pn_latency.sum=126.86100151,refresh=1609856,refresh_latency.avgcount=1609856,refresh_latency.avgtime=0.005900486,refresh_latency.sum=9498.932866761,restart=109,share_state=2,share_state_bytes.avgcount=2,share_state_bytes.sum=39612,share_state_keys.avgcount=2,share_state_keys.sum=2,start_leader=22,start_peon=0,store_state=14,store_state_bytes.avgcount=14,store_state_bytes.sum=51908281,store_state_keys.avgcount=14,store_state_keys.sum=7016,store_state_latency.avgcount=14,store_state_latency.avgtime=11.668377665,store_state_latency.sum=163.357287311 1587117563000000000
> ceph,collection=throttle-msgr_dispatch_throttler-mon-mgrc,host=stefanmon1,id=stefanmon1,type=monitor get=13225,get_or_fail_fail=0,get_or_fail_success=13225,get_started=0,get_sum=158700,max=104857600,put=13225,put_sum=158700,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000
> ceph,collection=AsyncMessenger::Worker-1,host=stefanmon1,id=stefanmon1,type=monitor msgr_active_connections=147680,msgr_created_connections=162374,msgr_recv_bytes=29781706740,msgr_recv_messages=7170733,msgr_running_fast_dispatch_time=0,msgr_running_recv_time=1728.559151358,msgr_running_send_time=2086.681244508,msgr_running_total_time=6084.532916585,msgr_send_bytes=94062125718,msgr_send_messages=9161564 1587117563000000000
> ceph,collection=throttle-msgr_dispatch_throttler-cluster,host=stefanosd1,id=0,type=osd get=281745,get_or_fail_fail=0,get_or_fail_success=281745,get_started=0,get_sum=446024457,max=104857600,put=281745,put_sum=446024457,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-bluestore_throttle_bytes,host=stefanosd1,id=0,type=osd get=275707,get_or_fail_fail=0,get_or_fail_success=0,get_started=275707,get_sum=185073179842,max=67108864,put=268870,put_sum=185073179842,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,host=stefanosd1,id=0,type=osd get=2606982,get_or_fail_fail=0,get_or_fail_success=2606982,get_started=0,get_sum=5224391928,max=104857600,put=2606982,put_sum=5224391928,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=rocksdb,host=stefanosd1,id=0,type=osd compact=0,compact_queue_len=0,compact_queue_merge=0,compact_range=0,get=1570,get_latency.avgcount=1570,get_latency.avgtime=0.000051233,get_latency.sum=0.080436788,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=275707,submit_latency.avgtime=0.000174936,submit_latency.sum=48.231345334,submit_sync_latency.avgcount=268870,submit_sync_latency.avgtime=0.006097313,submit_sync_latency.sum=1639.384555624,submit_transaction=275707,submit_transaction_sync=268870 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,host=stefanosd1,id=0,type=osd get=2606982,get_or_fail_fail=0,get_or_fail_success=2606982,get_started=0,get_sum=5224391928,max=104857600,put=2606982,put_sum=5224391928,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-objecter_bytes,host=stefanosd1,id=0,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,host=stefanosd1,id=0,type=osd get=2610285,get_or_fail_fail=0,get_or_fail_success=2610285,get_started=0,get_sum=5231011140,max=104857600,put=2610285,put_sum=5231011140,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=AsyncMessenger::Worker-1,host=stefanosd1,id=0,type=osd msgr_active_connections=2093,msgr_created_connections=29142,msgr_recv_bytes=7214238199,msgr_recv_messages=3928206,msgr_running_fast_dispatch_time=171.289615064,msgr_running_recv_time=278.531155966,msgr_running_send_time=489.482588813,msgr_running_total_time=1134.004853662,msgr_send_bytes=9814725232,msgr_send_messages=3814927 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-client,host=stefanosd1,id=0,type=osd get=488206,get_or_fail_fail=0,get_or_fail_success=488206,get_started=0,get_sum=104085134,max=104857600,put=488206,put_sum=104085134,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=finisher-defered_finisher,host=stefanosd1,id=0,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000
> ceph,collection=recoverystate_perf,host=stefanosd1,id=0,type=osd activating_latency.avgcount=87,activating_latency.avgtime=0.114348341,activating_latency.sum=9.948305683,active_latency.avgcount=25,active_latency.avgtime=1790.961574431,active_latency.sum=44774.039360795,backfilling_latency.avgcount=0,backfilling_latency.avgtime=0,backfilling_latency.sum=0,clean_latency.avgcount=25,clean_latency.avgtime=1790.830827794,clean_latency.sum=44770.770694867,down_latency.avgcount=0,down_latency.avgtime=0,down_latency.sum=0,getinfo_latency.avgcount=141,getinfo_latency.avgtime=0.446233476,getinfo_latency.sum=62.918920183,getlog_latency.avgcount=87,getlog_latency.avgtime=0.007708069,getlog_latency.sum=0.670602073,getmissing_latency.avgcount=87,getmissing_latency.avgtime=0.000077594,getmissing_latency.sum=0.006750701,incomplete_latency.avgcount=0,incomplete_latency.avgtime=0,incomplete_latency.sum=0,initial_latency.avgcount=166,initial_latency.avgtime=0.001313715,initial_latency.sum=0.218076764,notbackfilling_latency.avgcount=0,notbackfilling_latency.avgtime=0,notbackfilling_latency.sum=0,notrecovering_latency.avgcount=0,notrecovering_latency.avgtime=0,notrecovering_latency.sum=0,peering_latency.avgcount=141,peering_latency.avgtime=0.948324273,peering_latency.sum=133.713722563,primary_latency.avgcount=79,primary_latency.avgtime=567.706192991,primary_latency.sum=44848.78924634,recovered_latency.avgcount=87,recovered_latency.avgtime=0.000378284,recovered_latency.sum=0.032910791,recovering_latency.avgcount=2,recovering_latency.avgtime=0.338242008,recovering_latency.sum=0.676484017,replicaactive_latency.avgcount=23,replicaactive_latency.avgtime=1790.893991295,replicaactive_latency.sum=41190.561799786,repnotrecovering_latency.avgcount=25,repnotrecovering_latency.avgtime=1647.627024984,repnotrecovering_latency.sum=41190.675624616,reprecovering_latency.avgcount=2,reprecovering_latency.avgtime=0.311884638,reprecovering_latency.sum=0.623769276,repwaitbackfillreserved_latency.avgcount=0,repwaitbackfillreserved_latency.avgtime=0,repwaitbackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgcount=2,repwaitrecoveryreserved_latency.avgtime=0.000462873,repwaitrecoveryreserved_latency.sum=0.000925746,reset_latency.avgcount=372,reset_latency.avgtime=0.125056393,reset_latency.sum=46.520978537,start_latency.avgcount=372,start_latency.avgtime=0.000109397,start_latency.sum=0.040695881,started_latency.avgcount=206,started_latency.avgtime=418.299777245,started_latency.sum=86169.754112641,stray_latency.avgcount=231,stray_latency.avgtime=0.98203205,stray_latency.sum=226.849403565,waitactingchange_latency.avgcount=0,waitactingchange_latency.avgtime=0,waitactingchange_latency.sum=0,waitlocalbackfillreserved_latency.avgcount=0,waitlocalbackfillreserved_latency.avgtime=0,waitlocalbackfillreserved_latency.sum=0,waitlocalrecoveryreserved_latency.avgcount=2,waitlocalrecoveryreserved_latency.avgtime=0.002802377,waitlocalrecoveryreserved_latency.sum=0.005604755,waitremotebackfillreserved_latency.avgcount=0,waitremotebackfillreserved_latency.avgtime=0,waitremotebackfillreserved_latency.sum=0,waitremoterecoveryreserved_latency.avgcount=2,waitremoterecoveryreserved_latency.avgtime=0.012855439,waitremoterecoveryreserved_latency.sum=0.025710878,waitupthru_latency.avgcount=87,waitupthru_latency.avgtime=0.805727895,waitupthru_latency.sum=70.09832695 1587117698000000000
> ceph,collection=cct,host=stefanosd1,id=0,type=osd total_workers=6,unhealthy_workers=0 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,host=stefanosd1,id=0,type=osd get=2610285,get_or_fail_fail=0,get_or_fail_success=2610285,get_started=0,get_sum=5231011140,max=104857600,put=2610285,put_sum=5231011140,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=bluefs,host=stefanosd1,id=0,type=osd bytes_written_slow=0,bytes_written_sst=9018781,bytes_written_wal=831081573,db_total_bytes=4294967296,db_used_bytes=434110464,files_written_sst=3,files_written_wal=2,gift_bytes=0,log_bytes=134291456,log_compactions=1,logged_bytes=1101668352,max_bytes_db=1234173952,max_bytes_slow=0,max_bytes_wal=0,num_files=11,reclaim_bytes=0,slow_total_bytes=0,slow_used_bytes=0,wal_total_bytes=0,wal_used_bytes=0 1587117698000000000
> ceph,collection=mempool,host=stefanosd1,id=0,type=osd bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=10600,bluefs_items=458,bluestore_alloc_bytes=230288,bluestore_alloc_items=28786,bluestore_cache_data_bytes=622592,bluestore_cache_data_items=43,bluestore_cache_onode_bytes=249280,bluestore_cache_onode_items=380,bluestore_cache_other_bytes=192678,bluestore_cache_other_items=20199,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=8272,bluestore_txc_items=11,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=670130,bluestore_writing_deferred_items=176,bluestore_writing_items=0,buffer_anon_bytes=2412465,buffer_anon_items=297,buffer_meta_bytes=5896,buffer_meta_items=67,mds_co_bytes=0,mds_co_items=0,osd_bytes=2124800,osd_items=166,osd_mapbl_bytes=155152,osd_mapbl_items=10,osd_pglog_bytes=3214704,osd_pglog_items=6288,osdmap_bytes=710892,osdmap_items=4426,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117698000000000
> ceph,collection=osd,host=stefanosd1,id=0,type=osd agent_evict=0,agent_flush=0,agent_skip=0,agent_wake=0,cached_crc=0,cached_crc_adjusted=0,copyfrom=0,heartbeat_to_peers=7,loadavg=11,map_message_epoch_dups=21,map_message_epochs=40,map_messages=31,messages_delayed_for_map=0,missed_crc=0,numpg=166,numpg_primary=62,numpg_removing=0,numpg_replica=104,numpg_stray=0,object_ctx_cache_hit=476529,object_ctx_cache_total=476536,op=476525,op_before_dequeue_op_lat.avgcount=755708,op_before_dequeue_op_lat.avgtime=0.000205759,op_before_dequeue_op_lat.sum=155.493843473,op_before_queue_op_lat.avgcount=755702,op_before_queue_op_lat.avgtime=0.000047877,op_before_queue_op_lat.sum=36.181069552,op_cache_hit=0,op_in_bytes=0,op_latency.avgcount=476525,op_latency.avgtime=0.000365956,op_latency.sum=174.387387878,op_out_bytes=10882,op_prepare_latency.avgcount=476527,op_prepare_latency.avgtime=0.000205307,op_prepare_latency.sum=97.834380034,op_process_latency.avgcount=476525,op_process_latency.avgtime=0.000139616,op_process_latency.sum=66.530847665,op_r=476521,op_r_latency.avgcount=476521,op_r_latency.avgtime=0.00036559,op_r_latency.sum=174.21148267,op_r_out_bytes=10882,op_r_prepare_latency.avgcount=476523,op_r_prepare_latency.avgtime=0.000205302,op_r_prepare_latency.sum=97.831473175,op_r_process_latency.avgcount=476521,op_r_process_latency.avgtime=0.000139396,op_r_process_latency.sum=66.425498624,op_rw=2,op_rw_in_bytes=0,op_rw_latency.avgcount=2,op_rw_latency.avgtime=0.048818975,op_rw_latency.sum=0.097637951,op_rw_out_bytes=0,op_rw_prepare_latency.avgcount=2,op_rw_prepare_latency.avgtime=0.000467887,op_rw_prepare_latency.sum=0.000935775,op_rw_process_latency.avgcount=2,op_rw_process_latency.avgtime=0.013741256,op_rw_process_latency.sum=0.027482512,op_w=2,op_w_in_bytes=0,op_w_latency.avgcount=2,op_w_latency.avgtime=0.039133628,op_w_latency.sum=0.078267257,op_w_prepare_latency.avgcount=2,op_w_prepare_latency.avgtime=0.000985542,op_w_prepare_latency.sum=0.001971084,op_w_process_latency.avgcount=2,op_w_process_latency.avgtime=0.038933264,op_w_process_latency.sum=0.077866529,op_wip=0,osd_map_bl_cache_hit=22,osd_map_bl_cache_miss=40,osd_map_cache_hit=4570,osd_map_cache_miss=15,osd_map_cache_miss_low=0,osd_map_cache_miss_low_avg.avgcount=0,osd_map_cache_miss_low_avg.sum=0,osd_pg_biginfo=2050,osd_pg_fastinfo=265780,osd_pg_info=274542,osd_tier_flush_lat.avgcount=0,osd_tier_flush_lat.avgtime=0,osd_tier_flush_lat.sum=0,osd_tier_promote_lat.avgcount=0,osd_tier_promote_lat.avgtime=0,osd_tier_promote_lat.sum=0,osd_tier_r_lat.avgcount=0,osd_tier_r_lat.avgtime=0,osd_tier_r_lat.sum=0,pull=0,push=2,push_out_bytes=10,recovery_bytes=10,recovery_ops=2,stat_bytes=107369988096,stat_bytes_avail=106271539200,stat_bytes_used=1098448896,subop=253554,subop_in_bytes=168644225,subop_latency.avgcount=253554,subop_latency.avgtime=0.0073036,subop_latency.sum=1851.857230388,subop_pull=0,subop_pull_latency.avgcount=0,subop_pull_latency.avgtime=0,subop_pull_latency.sum=0,subop_push=0,subop_push_in_bytes=0,subop_push_latency.avgcount=0,subop_push_latency.avgtime=0,subop_push_latency.sum=0,subop_w=253554,subop_w_in_bytes=168644225,subop_w_latency.avgcount=253554,subop_w_latency.avgtime=0.0073036,subop_w_latency.sum=1851.857230388,tier_clean=0,tier_delay=0,tier_dirty=0,tier_evict=0,tier_flush=0,tier_flush_fail=0,tier_promote=0,tier_proxy_read=0,tier_proxy_write=0,tier_try_flush=0,tier_try_flush_fail=0,tier_whiteout=0 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,host=stefanosd1,id=0,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=AsyncMessenger::Worker-2,host=stefanosd1,id=0,type=osd msgr_active_connections=2055,msgr_created_connections=27411,msgr_recv_bytes=6431950009,msgr_recv_messages=3552443,msgr_running_fast_dispatch_time=162.271664213,msgr_running_recv_time=254.307853033,msgr_running_send_time=503.037285799,msgr_running_total_time=1130.21070681,msgr_send_bytes=10865436237,msgr_send_messages=3523374 1587117698000000000
> ceph,collection=bluestore,host=stefanosd1,id=0,type=osd bluestore_allocated=24641536,bluestore_blob_split=0,bluestore_blobs=88,bluestore_buffer_bytes=622592,bluestore_buffer_hit_bytes=160578,bluestore_buffer_miss_bytes=540236,bluestore_buffers=43,bluestore_compressed=0,bluestore_compressed_allocated=0,bluestore_compressed_original=0,bluestore_extent_compress=0,bluestore_extents=88,bluestore_fragmentation_micros=1,bluestore_gc_merged=0,bluestore_onode_hits=532102,bluestore_onode_misses=388,bluestore_onode_reshard=0,bluestore_onode_shard_hits=0,bluestore_onode_shard_misses=0,bluestore_onodes=380,bluestore_read_eio=0,bluestore_reads_with_retries=0,bluestore_stored=1987856,bluestore_txc=275707,bluestore_write_big=0,bluestore_write_big_blobs=0,bluestore_write_big_bytes=0,bluestore_write_small=60,bluestore_write_small_bytes=343843,bluestore_write_small_deferred=22,bluestore_write_small_new=38,bluestore_write_small_pre_read=22,bluestore_write_small_unused=0,commit_lat.avgcount=275707,commit_lat.avgtime=0.00699778,commit_lat.sum=1929.337103334,compress_lat.avgcount=0,compress_lat.avgtime=0,compress_lat.sum=0,compress_rejected_count=0,compress_success_count=0,csum_lat.avgcount=67,csum_lat.avgtime=0.000032601,csum_lat.sum=0.002184323,decompress_lat.avgcount=0,decompress_lat.avgtime=0,decompress_lat.sum=0,deferred_write_bytes=0,deferred_write_ops=0,kv_commit_lat.avgcount=268870,kv_commit_lat.avgtime=0.006365428,kv_commit_lat.sum=1711.472749866,kv_final_lat.avgcount=268867,kv_final_lat.avgtime=0.000043227,kv_final_lat.sum=11.622427109,kv_flush_lat.avgcount=268870,kv_flush_lat.avgtime=0.000000223,kv_flush_lat.sum=0.060141588,kv_sync_lat.avgcount=268870,kv_sync_lat.avgtime=0.006365652,kv_sync_lat.sum=1711.532891454,omap_lower_bound_lat.avgcount=2,omap_lower_bound_lat.avgtime=0.000006524,omap_lower_bound_lat.sum=0.000013048,omap_next_lat.avgcount=6704,omap_next_lat.avgtime=0.000004721,omap_next_lat.sum=0.031654097,omap_seek_to_first_lat.avgcount=323,omap_seek_to_first_lat.avgtime=0.00000522,omap_seek_to_first_lat.sum=0.00168614,omap_upper_bound_lat.avgcount=4,omap_upper_bound_lat.avgtime=0.000013086,omap_upper_bound_lat.sum=0.000052344,read_lat.avgcount=227,read_lat.avgtime=0.000699457,read_lat.sum=0.158776879,read_onode_meta_lat.avgcount=311,read_onode_meta_lat.avgtime=0.000072207,read_onode_meta_lat.sum=0.022456667,read_wait_aio_lat.avgcount=84,read_wait_aio_lat.avgtime=0.001556141,read_wait_aio_lat.sum=0.130715885,state_aio_wait_lat.avgcount=275707,state_aio_wait_lat.avgtime=0.000000345,state_aio_wait_lat.sum=0.095246457,state_deferred_aio_wait_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,state_deferred_aio_wait_lat.sum=0,state_deferred_cleanup_lat.avgcount=0,state_deferred_cleanup_lat.avgtime=0,state_deferred_cleanup_lat.sum=0,state_deferred_queued_lat.avgcount=0,state_deferred_queued_lat.avgtime=0,state_deferred_queued_lat.sum=0,state_done_lat.avgcount=275696,state_done_lat.avgtime=0.00000286,state_done_lat.sum=0.788700007,state_finishing_lat.avgcount=275696,state_finishing_lat.avgtime=0.000000302,state_finishing_lat.sum=0.083437168,state_io_done_lat.avgcount=275707,state_io_done_lat.avgtime=0.000001041,state_io_done_lat.sum=0.287025147,state_kv_commiting_lat.avgcount=275707,state_kv_commiting_lat.avgtime=0.006424459,state_kv_commiting_lat.sum=1771.268407864,state_kv_done_lat.avgcount=275707,state_kv_done_lat.avgtime=0.000001627,state_kv_done_lat.sum=0.448805853,state_kv_queued_lat.avgcount=275707,state_kv_queued_lat.avgtime=0.000488565,state_kv_queued_lat.sum=134.7009424,state_prepare_lat.avgcount=275707,state_prepare_lat.avgtime=0.000082464,state_prepare_lat.sum=22.736065534,submit_lat.avgcount=275707,submit_lat.avgtime=0.000120236,submit_lat.sum=33.149934412,throttle_lat.avgcount=275707,throttle_lat.avgtime=0.000001571,throttle_lat.sum=0.433185935,write_pad_bytes=151773,write_penalty_read_ops=0 1587117698000000000
> ceph,collection=finisher-objecter-finisher-0,host=stefanosd1,id=0,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000
> ceph,collection=objecter,host=stefanosd1,id=0,type=osd command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=19,omap_del=0,omap_rd=0,omap_wr=0,op=0,op_active=0,op_laggy=0,op_pg=0,op_r=0,op_reply=0,op_resend=0,op_rmw=0,op_send=0,op_send_bytes=0,op_w=0,osd_laggy=0,osd_session_close=0,osd_session_open=0,osd_sessions=0,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117698000000000
> ceph,collection=finisher-commit_finisher,host=stefanosd1,id=0,type=osd complete_latency.avgcount=11,complete_latency.avgtime=0.003447516,complete_latency.sum=0.037922681,queue_len=0 1587117698000000000
> ceph,collection=throttle-objecter_ops,host=stefanosd1,id=0,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=AsyncMessenger::Worker-0,host=stefanosd1,id=0,type=osd msgr_active_connections=2128,msgr_created_connections=33685,msgr_recv_bytes=8679123051,msgr_recv_messages=4200356,msgr_running_fast_dispatch_time=151.889337454,msgr_running_recv_time=297.632294886,msgr_running_send_time=599.20020523,msgr_running_total_time=1321.361931202,msgr_send_bytes=11716202897,msgr_send_messages=4347418 1587117698000000000
> ceph,collection=throttle-osd_client_bytes,host=stefanosd1,id=0,type=osd get=476554,get_or_fail_fail=0,get_or_fail_success=476554,get_started=0,get_sum=103413728,max=524288000,put=476587,put_sum=103413728,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-bluestore_throttle_deferred_bytes,host=stefanosd1,id=0,type=osd get=11,get_or_fail_fail=0,get_or_fail_success=11,get_started=0,get_sum=7723117,max=201326592,put=0,put_sum=0,take=0,take_sum=0,val=7723117,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-cluster,host=stefanosd1,id=1,type=osd get=860895,get_or_fail_fail=0,get_or_fail_success=860895,get_started=0,get_sum=596482256,max=104857600,put=860895,put_sum=596482256,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-objecter_ops,host=stefanosd1,id=1,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-objecter_bytes,host=stefanosd1,id=1,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=finisher-defered_finisher,host=stefanosd1,id=1,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000
> ceph,collection=osd,host=stefanosd1,id=1,type=osd agent_evict=0,agent_flush=0,agent_skip=0,agent_wake=0,cached_crc=0,cached_crc_adjusted=0,copyfrom=0,heartbeat_to_peers=7,loadavg=11,map_message_epoch_dups=29,map_message_epochs=50,map_messages=39,messages_delayed_for_map=0,missed_crc=0,numpg=188,numpg_primary=71,numpg_removing=0,numpg_replica=117,numpg_stray=0,object_ctx_cache_hit=1349777,object_ctx_cache_total=2934118,op=1319230,op_before_dequeue_op_lat.avgcount=3792053,op_before_dequeue_op_lat.avgtime=0.000405802,op_before_dequeue_op_lat.sum=1538.826381623,op_before_queue_op_lat.avgcount=3778690,op_before_queue_op_lat.avgtime=0.000033273,op_before_queue_op_lat.sum=125.731131596,op_cache_hit=0,op_in_bytes=0,op_latency.avgcount=1319230,op_latency.avgtime=0.002858138,op_latency.sum=3770.541581676,op_out_bytes=1789210,op_prepare_latency.avgcount=1336472,op_prepare_latency.avgtime=0.000279458,op_prepare_latency.sum=373.488913339,op_process_latency.avgcount=1319230,op_process_latency.avgtime=0.002666408,op_process_latency.sum=3517.606407526,op_r=1075394,op_r_latency.avgcount=1075394,op_r_latency.avgtime=0.000303779,op_r_latency.sum=326.682443032,op_r_out_bytes=1789210,op_r_prepare_latency.avgcount=1075394,op_r_prepare_latency.avgtime=0.000171228,op_r_prepare_latency.sum=184.138580631,op_r_process_latency.avgcount=1075394,op_r_process_latency.avgtime=0.00011609,op_r_process_latency.sum=124.842894319,op_rw=243832,op_rw_in_bytes=0,op_rw_latency.avgcount=243832,op_rw_latency.avgtime=0.014123636,op_rw_latency.sum=3443.79445124,op_rw_out_bytes=0,op_rw_prepare_latency.avgcount=261072,op_rw_prepare_latency.avgtime=0.000725265,op_rw_prepare_latency.sum=189.346543463,op_rw_process_latency.avgcount=243832,op_rw_process_latency.avgtime=0.013914089,op_rw_process_latency.sum=3392.700241086,op_w=4,op_w_in_bytes=0,op_w_latency.avgcount=4,op_w_latency.avgtime=0.016171851,op_w_latency.sum=0.064687404,op_w_prepare_latency.avgcount=6,op_w_prepare_latency.avgtime=0.00063154,op_w_prepare_latency.sum=0.003789245,op_w_process_latency.avgcount=4,op_w_process_latency.avgtime=0.01581803,op_w_process_latency.sum=0.063272121,op_wip=0,osd_map_bl_cache_hit=36,osd_map_bl_cache_miss=40,osd_map_cache_hit=5404,osd_map_cache_miss=14,osd_map_cache_miss_low=0,osd_map_cache_miss_low_avg.avgcount=0,osd_map_cache_miss_low_avg.sum=0,osd_pg_biginfo=2333,osd_pg_fastinfo=576157,osd_pg_info=591751,osd_tier_flush_lat.avgcount=0,osd_tier_flush_lat.avgtime=0,osd_tier_flush_lat.sum=0,osd_tier_promote_lat.avgcount=0,osd_tier_promote_lat.avgtime=0,osd_tier_promote_lat.sum=0,osd_tier_r_lat.avgcount=0,osd_tier_r_lat.avgtime=0,osd_tier_r_lat.sum=0,pull=0,push=22,push_out_bytes=0,recovery_bytes=0,recovery_ops=21,stat_bytes=107369988096,stat_bytes_avail=106271997952,stat_bytes_used=1097990144,subop=306946,subop_in_bytes=204236742,subop_latency.avgcount=306946,subop_latency.avgtime=0.006744881,subop_latency.sum=2070.314452989,subop_pull=0,subop_pull_latency.avgcount=0,subop_pull_latency.avgtime=0,subop_pull_latency.sum=0,subop_push=0,subop_push_in_bytes=0,subop_push_latency.avgcount=0,subop_push_latency.avgtime=0,subop_push_latency.sum=0,subop_w=306946,subop_w_in_bytes=204236742,subop_w_latency.avgcount=306946,subop_w_latency.avgtime=0.006744881,subop_w_latency.sum=2070.314452989,tier_clean=0,tier_delay=0,tier_dirty=8,tier_evict=0,tier_flush=0,tier_flush_fail=0,tier_promote=0,tier_proxy_read=0,tier_proxy_write=0,tier_try_flush=0,tier_try_flush_fail=0,tier_whiteout=0 1587117698000000000
> ceph,collection=objecter,host=stefanosd1,id=1,type=osd command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=19,omap_del=0,omap_rd=0,omap_wr=0,op=0,op_active=0,op_laggy=0,op_pg=0,op_r=0,op_reply=0,op_resend=0,op_rmw=0,op_send=0,op_send_bytes=0,op_w=0,osd_laggy=0,osd_session_close=0,osd_session_open=0,osd_sessions=0,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117698000000000
> ceph,collection=AsyncMessenger::Worker-0,host=stefanosd1,id=1,type=osd msgr_active_connections=1356,msgr_created_connections=12290,msgr_recv_bytes=8577187219,msgr_recv_messages=6387040,msgr_running_fast_dispatch_time=475.903632306,msgr_running_recv_time=425.937196699,msgr_running_send_time=783.676217521,msgr_running_total_time=1989.242459076,msgr_send_bytes=12583034449,msgr_send_messages=6074344 1587117698000000000
> ceph,collection=bluestore,host=stefanosd1,id=1,type=osd bluestore_allocated=24182784,bluestore_blob_split=0,bluestore_blobs=88,bluestore_buffer_bytes=614400,bluestore_buffer_hit_bytes=142047,bluestore_buffer_miss_bytes=541480,bluestore_buffers=41,bluestore_compressed=0,bluestore_compressed_allocated=0,bluestore_compressed_original=0,bluestore_extent_compress=0,bluestore_extents=88,bluestore_fragmentation_micros=1,bluestore_gc_merged=0,bluestore_onode_hits=1403948,bluestore_onode_misses=1584732,bluestore_onode_reshard=0,bluestore_onode_shard_hits=0,bluestore_onode_shard_misses=0,bluestore_onodes=459,bluestore_read_eio=0,bluestore_reads_with_retries=0,bluestore_stored=1985647,bluestore_txc=593150,bluestore_write_big=0,bluestore_write_big_blobs=0,bluestore_write_big_bytes=0,bluestore_write_small=58,bluestore_write_small_bytes=343091,bluestore_write_small_deferred=20,bluestore_write_small_new=38,bluestore_write_small_pre_read=20,bluestore_write_small_unused=0,commit_lat.avgcount=593150,commit_lat.avgtime=0.006514834,commit_lat.sum=3864.274280733,compress_lat.avgcount=0,compress_lat.avgtime=0,compress_lat.sum=0,compress_rejected_count=0,compress_success_count=0,csum_lat.avgcount=60,csum_lat.avgtime=0.000028258,csum_lat.sum=0.001695512,decompress_lat.avgcount=0,decompress_lat.avgtime=0,decompress_lat.sum=0,deferred_write_bytes=0,deferred_write_ops=0,kv_commit_lat.avgcount=578129,kv_commit_lat.avgtime=0.00570707,kv_commit_lat.sum=3299.423186928,kv_final_lat.avgcount=578124,kv_final_lat.avgtime=0.000042752,kv_final_lat.sum=24.716171934,kv_flush_lat.avgcount=578129,kv_flush_lat.avgtime=0.000000209,kv_flush_lat.sum=0.121169044,kv_sync_lat.avgcount=578129,kv_sync_lat.avgtime=0.00570728,kv_sync_lat.sum=3299.544355972,omap_lower_bound_lat.avgcount=22,omap_lower_bound_lat.avgtime=0.000005979,omap_lower_bound_lat.sum=0.000131539,omap_next_lat.avgcount=13248,omap_next_lat.avgtime=0.000004836,omap_next_lat.sum=0.064077797,omap_seek_to_first_lat.avgcount=525,omap_seek_to_first_lat.avgtime=0.000004906,omap_seek_to_first_lat.sum=0.002575786,omap_upper_bound_lat.avgcount=0,omap_upper_bound_lat.avgtime=0,omap_upper_bound_lat.sum=0,read_lat.avgcount=406,read_lat.avgtime=0.000383254,read_lat.sum=0.155601529,read_onode_meta_lat.avgcount=483,read_onode_meta_lat.avgtime=0.000008805,read_onode_meta_lat.sum=0.004252832,read_wait_aio_lat.avgcount=77,read_wait_aio_lat.avgtime=0.001907361,read_wait_aio_lat.sum=0.146866799,state_aio_wait_lat.avgcount=593150,state_aio_wait_lat.avgtime=0.000000388,state_aio_wait_lat.sum=0.230498048,state_deferred_aio_wait_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,state_deferred_aio_wait_lat.sum=0,state_deferred_cleanup_lat.avgcount=0,state_deferred_cleanup_lat.avgtime=0,state_deferred_cleanup_lat.sum=0,state_deferred_queued_lat.avgcount=0,state_deferred_queued_lat.avgtime=0,state_deferred_queued_lat.sum=0,state_done_lat.avgcount=593140,state_done_lat.avgtime=0.000003048,state_done_lat.sum=1.80789161,state_finishing_lat.avgcount=593140,state_finishing_lat.avgtime=0.000000325,state_finishing_lat.sum=0.192952339,state_io_done_lat.avgcount=593150,state_io_done_lat.avgtime=0.000001202,state_io_done_lat.sum=0.713333116,state_kv_commiting_lat.avgcount=593150,state_kv_commiting_lat.avgtime=0.005788541,state_kv_commiting_lat.sum=3433.473378536,state_kv_done_lat.avgcount=593150,state_kv_done_lat.avgtime=0.000001472,state_kv_done_lat.sum=0.873559611,state_kv_queued_lat.avgcount=593150,state_kv_queued_lat.avgtime=0.000634215,state_kv_queued_lat.sum=376.18491577,state_prepare_lat.avgcount=593150,state_prepare_lat.avgtime=0.000089694,state_prepare_lat.sum=53.202464675,submit_lat.avgcount=593150,submit_lat.avgtime=0.000127856,submit_lat.sum=75.83816759,throttle_lat.avgcount=593150,throttle_lat.avgtime=0.000001726,throttle_lat.sum=1.023832181,write_pad_bytes=144333,write_penalty_read_ops=0 1587117698000000000
> ceph,collection=throttle-osd_client_bytes,host=stefanosd1,id=1,type=osd get=2920772,get_or_fail_fail=0,get_or_fail_success=2920772,get_started=0,get_sum=739935873,max=524288000,put=4888498,put_sum=739935873,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,host=stefanosd1,id=1,type=osd get=2605442,get_or_fail_fail=0,get_or_fail_success=2605442,get_started=0,get_sum=5221305768,max=104857600,put=2605442,put_sum=5221305768,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=AsyncMessenger::Worker-2,host=stefanosd1,id=1,type=osd msgr_active_connections=1375,msgr_created_connections=12689,msgr_recv_bytes=6393440855,msgr_recv_messages=3260458,msgr_running_fast_dispatch_time=120.622437418,msgr_running_recv_time=225.24709441,msgr_running_send_time=499.150587343,msgr_running_total_time=1043.340296846,msgr_send_bytes=11134862571,msgr_send_messages=3450760 1587117698000000000
> ceph,collection=bluefs,host=stefanosd1,id=1,type=osd bytes_written_slow=0,bytes_written_sst=19824993,bytes_written_wal=1788507023,db_total_bytes=4294967296,db_used_bytes=522190848,files_written_sst=4,files_written_wal=2,gift_bytes=0,log_bytes=1056768,log_compactions=2,logged_bytes=1933271040,max_bytes_db=1483735040,max_bytes_slow=0,max_bytes_wal=0,num_files=12,reclaim_bytes=0,slow_total_bytes=0,slow_used_bytes=0,wal_total_bytes=0,wal_used_bytes=0 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,host=stefanosd1,id=1,type=osd get=2605442,get_or_fail_fail=0,get_or_fail_success=2605442,get_started=0,get_sum=5221305768,max=104857600,put=2605442,put_sum=5221305768,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-bluestore_throttle_deferred_bytes,host=stefanosd1,id=1,type=osd get=10,get_or_fail_fail=0,get_or_fail_success=10,get_started=0,get_sum=7052009,max=201326592,put=0,put_sum=0,take=0,take_sum=0,val=7052009,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=rocksdb,host=stefanosd1,id=1,type=osd compact=0,compact_queue_len=0,compact_queue_merge=0,compact_range=0,get=1586061,get_latency.avgcount=1586061,get_latency.avgtime=0.000083009,get_latency.sum=131.658296684,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=593150,submit_latency.avgtime=0.000172072,submit_latency.sum=102.064900673,submit_sync_latency.avgcount=578129,submit_sync_latency.avgtime=0.005447017,submit_sync_latency.sum=3149.078822012,submit_transaction=593150,submit_transaction_sync=578129 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,host=stefanosd1,id=1,type=osd get=2607669,get_or_fail_fail=0,get_or_fail_success=2607669,get_started=0,get_sum=5225768676,max=104857600,put=2607669,put_sum=5225768676,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=recoverystate_perf,host=stefanosd1,id=1,type=osd activating_latency.avgcount=104,activating_latency.avgtime=0.071646485,activating_latency.sum=7.451234493,active_latency.avgcount=33,active_latency.avgtime=1734.369034268,active_latency.sum=57234.178130859,backfilling_latency.avgcount=1,backfilling_latency.avgtime=2.598401698,backfilling_latency.sum=2.598401698,clean_latency.avgcount=33,clean_latency.avgtime=1734.213467342,clean_latency.sum=57229.044422292,down_latency.avgcount=0,down_latency.avgtime=0,down_latency.sum=0,getinfo_latency.avgcount=167,getinfo_latency.avgtime=0.373444627,getinfo_latency.sum=62.365252849,getlog_latency.avgcount=105,getlog_latency.avgtime=0.003575062,getlog_latency.sum=0.375381569,getmissing_latency.avgcount=104,getmissing_latency.avgtime=0.000157091,getmissing_latency.sum=0.016337565,incomplete_latency.avgcount=0,incomplete_latency.avgtime=0,incomplete_latency.sum=0,initial_latency.avgcount=188,initial_latency.avgtime=0.001833512,initial_latency.sum=0.344700343,notbackfilling_latency.avgcount=0,notbackfilling_latency.avgtime=0,notbackfilling_latency.sum=0,notrecovering_latency.avgcount=0,notrecovering_latency.avgtime=0,notrecovering_latency.sum=0,peering_latency.avgcount=167,peering_latency.avgtime=1.501818082,peering_latency.sum=250.803619796,primary_latency.avgcount=97,primary_latency.avgtime=591.344286378,primary_latency.sum=57360.395778762,recovered_latency.avgcount=104,recovered_latency.avgtime=0.000291138,recovered_latency.sum=0.030278433,recovering_latency.avgcount=2,recovering_latency.avgtime=0.142378096,recovering_latency.sum=0.284756192,replicaactive_latency.avgcount=32,replicaactive_latency.avgtime=1788.474901442,replicaactive_latency.sum=57231.196846165,repnotrecovering_latency.avgcount=34,repnotrecovering_latency.avgtime=1683.273587087,repnotrecovering_latency.sum=57231.301960987,reprecovering_latency.avgcount=2,reprecovering_latency.avgtime=0.418094818,reprecovering_latency.sum=0.836189637,repwaitbackfillreserved_latency.avgcount=0,repwaitbackfillreserved_latency.avgtime=0,repwaitbackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgcount=2,repwaitrecoveryreserved_latency.avgtime=0.000588413,repwaitrecoveryreserved_latency.sum=0.001176827,reset_latency.avgcount=433,reset_latency.avgtime=0.15669689,reset_latency.sum=67.849753631,start_latency.avgcount=433,start_latency.avgtime=0.000412707,start_latency.sum=0.178702508,started_latency.avgcount=245,started_latency.avgtime=468.419544137,started_latency.sum=114762.788313581,stray_latency.avgcount=266,stray_latency.avgtime=1.489291271,stray_latency.sum=396.151478238,waitactingchange_latency.avgcount=1,waitactingchange_latency.avgtime=0.982689906,waitactingchange_latency.sum=0.982689906,waitlocalbackfillreserved_latency.avgcount=1,waitlocalbackfillreserved_latency.avgtime=0.000542092,waitlocalbackfillreserved_latency.sum=0.000542092,waitlocalrecoveryreserved_latency.avgcount=2,waitlocalrecoveryreserved_latency.avgtime=0.00391669,waitlocalrecoveryreserved_latency.sum=0.007833381,waitremotebackfillreserved_latency.avgcount=1,waitremotebackfillreserved_latency.avgtime=0.003110409,waitremotebackfillreserved_latency.sum=0.003110409,waitremoterecoveryreserved_latency.avgcount=2,waitremoterecoveryreserved_latency.avgtime=0.012229338,waitremoterecoveryreserved_latency.sum=0.024458677,waitupthru_latency.avgcount=104,waitupthru_latency.avgtime=1.807608905,waitupthru_latency.sum=187.991326197 1587117698000000000
> ceph,collection=AsyncMessenger::Worker-1,host=stefanosd1,id=1,type=osd msgr_active_connections=1289,msgr_created_connections=9469,msgr_recv_bytes=8348149800,msgr_recv_messages=5048791,msgr_running_fast_dispatch_time=313.754567889,msgr_running_recv_time=372.054833029,msgr_running_send_time=694.900405016,msgr_running_total_time=1656.294769387,msgr_send_bytes=11550148208,msgr_send_messages=5175962 1587117698000000000
> ceph,collection=throttle-bluestore_throttle_bytes,host=stefanosd1,id=1,type=osd get=593150,get_or_fail_fail=0,get_or_fail_success=0,get_started=593150,get_sum=398147414260,max=67108864,put=578129,put_sum=398147414260,take=0,take_sum=0,val=0,wait.avgcount=29,wait.avgtime=0.000972655,wait.sum=0.028207005 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,host=stefanosd1,id=1,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=cct,host=stefanosd1,id=1,type=osd total_workers=6,unhealthy_workers=0 1587117698000000000
> ceph,collection=mempool,host=stefanosd1,id=1,type=osd bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=13064,bluefs_items=593,bluestore_alloc_bytes=230288,bluestore_alloc_items=28786,bluestore_cache_data_bytes=614400,bluestore_cache_data_items=41,bluestore_cache_onode_bytes=301104,bluestore_cache_onode_items=459,bluestore_cache_other_bytes=230945,bluestore_cache_other_items=26119,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=7520,bluestore_txc_items=10,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=657768,bluestore_writing_deferred_items=172,bluestore_writing_items=0,buffer_anon_bytes=2328515,buffer_anon_items=271,buffer_meta_bytes=5808,buffer_meta_items=66,mds_co_bytes=0,mds_co_items=0,osd_bytes=2406400,osd_items=188,osd_mapbl_bytes=139623,osd_mapbl_items=9,osd_pglog_bytes=6768784,osd_pglog_items=18179,osdmap_bytes=710892,osdmap_items=4426,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-client,host=stefanosd1,id=1,type=osd get=2932513,get_or_fail_fail=0,get_or_fail_success=2932513,get_started=0,get_sum=740620215,max=104857600,put=2932513,put_sum=740620215,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,host=stefanosd1,id=1,type=osd get=2607669,get_or_fail_fail=0,get_or_fail_success=2607669,get_started=0,get_sum=5225768676,max=104857600,put=2607669,put_sum=5225768676,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=finisher-commit_finisher,host=stefanosd1,id=1,type=osd complete_latency.avgcount=10,complete_latency.avgtime=0.002884646,complete_latency.sum=0.028846469,queue_len=0 1587117698000000000
> ceph,collection=finisher-objecter-finisher-0,host=stefanosd1,id=1,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000
> ceph,collection=throttle-objecter_bytes,host=stefanosd1,id=2,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=finisher-commit_finisher,host=stefanosd1,id=2,type=osd complete_latency.avgcount=11,complete_latency.avgtime=0.002714416,complete_latency.sum=0.029858583,queue_len=0 1587117698000000000
> ceph,collection=finisher-defered_finisher,host=stefanosd1,id=2,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000
> ceph,collection=objecter,host=stefanosd1,id=2,type=osd command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=19,omap_del=0,omap_rd=0,omap_wr=0,op=0,op_active=0,op_laggy=0,op_pg=0,op_r=0,op_reply=0,op_resend=0,op_rmw=0,op_send=0,op_send_bytes=0,op_w=0,osd_laggy=0,osd_session_close=0,osd_session_open=0,osd_sessions=0,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,host=stefanosd1,id=2,type=osd get=2607136,get_or_fail_fail=0,get_or_fail_success=2607136,get_started=0,get_sum=5224700544,max=104857600,put=2607136,put_sum=5224700544,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=mempool,host=stefanosd1,id=2,type=osd bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=11624,bluefs_items=522,bluestore_alloc_bytes=230288,bluestore_alloc_items=28786,bluestore_cache_data_bytes=614400,bluestore_cache_data_items=41,bluestore_cache_onode_bytes=228288,bluestore_cache_onode_items=348,bluestore_cache_other_bytes=174158,bluestore_cache_other_items=18527,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=8272,bluestore_txc_items=11,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=670130,bluestore_writing_deferred_items=176,bluestore_writing_items=0,buffer_anon_bytes=2311664,buffer_anon_items=244,buffer_meta_bytes=5456,buffer_meta_items=62,mds_co_bytes=0,mds_co_items=0,osd_bytes=1920000,osd_items=150,osd_mapbl_bytes=155152,osd_mapbl_items=10,osd_pglog_bytes=3393520,osd_pglog_items=9128,osdmap_bytes=710892,osdmap_items=4426,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117698000000000
> ceph,collection=osd,host=stefanosd1,id=2,type=osd agent_evict=0,agent_flush=0,agent_skip=0,agent_wake=0,cached_crc=0,cached_crc_adjusted=0,copyfrom=0,heartbeat_to_peers=7,loadavg=11,map_message_epoch_dups=37,map_message_epochs=56,map_messages=37,messages_delayed_for_map=0,missed_crc=0,numpg=150,numpg_primary=59,numpg_removing=0,numpg_replica=91,numpg_stray=0,object_ctx_cache_hit=705923,object_ctx_cache_total=705951,op=690584,op_before_dequeue_op_lat.avgcount=1155697,op_before_dequeue_op_lat.avgtime=0.000217926,op_before_dequeue_op_lat.sum=251.856487141,op_before_queue_op_lat.avgcount=1148445,op_before_queue_op_lat.avgtime=0.000039696,op_before_queue_op_lat.sum=45.589516462,op_cache_hit=0,op_in_bytes=0,op_latency.avgcount=690584,op_latency.avgtime=0.002488685,op_latency.sum=1718.646504654,op_out_bytes=1026000,op_prepare_latency.avgcount=698700,op_prepare_latency.avgtime=0.000300375,op_prepare_latency.sum=209.872029659,op_process_latency.avgcount=690584,op_process_latency.avgtime=0.00230742,op_process_latency.sum=1593.46739165,op_r=548020,op_r_latency.avgcount=548020,op_r_latency.avgtime=0.000298287,op_r_latency.sum=163.467760649,op_r_out_bytes=1026000,op_r_prepare_latency.avgcount=548020,op_r_prepare_latency.avgtime=0.000186359,op_r_prepare_latency.sum=102.128629183,op_r_process_latency.avgcount=548020,op_r_process_latency.avgtime=0.00012716,op_r_process_latency.sum=69.686468884,op_rw=142562,op_rw_in_bytes=0,op_rw_latency.avgcount=142562,op_rw_latency.avgtime=0.010908597,op_rw_latency.sum=1555.151525732,op_rw_out_bytes=0,op_rw_prepare_latency.avgcount=150678,op_rw_prepare_latency.avgtime=0.000715043,op_rw_prepare_latency.sum=107.741399304,op_rw_process_latency.avgcount=142562,op_rw_process_latency.avgtime=0.01068836,op_rw_process_latency.sum=1523.754107887,op_w=2,op_w_in_bytes=0,op_w_latency.avgcount=2,op_w_latency.avgtime=0.013609136,op_w_latency.sum=0.027218273,op_w_prepare_latency.avgcount=2,op_w_prepare_latency.avgtime=0.001000586,op_w_prepare_latency.sum=0.002001172,op_w_process_latency.avgcount=2,op_w_process_latency.avgtime=0.013407439,op_w_process_latency.sum=0.026814879,op_wip=0,osd_map_bl_cache_hit=15,osd_map_bl_cache_miss=41,osd_map_cache_hit=4241,osd_map_cache_miss=14,osd_map_cache_miss_low=0,osd_map_cache_miss_low_avg.avgcount=0,osd_map_cache_miss_low_avg.sum=0,osd_pg_biginfo=1824,osd_pg_fastinfo=285998,osd_pg_info=294869,osd_tier_flush_lat.avgcount=0,osd_tier_flush_lat.avgtime=0,osd_tier_flush_lat.sum=0,osd_tier_promote_lat.avgcount=0,osd_tier_promote_lat.avgtime=0,osd_tier_promote_lat.sum=0,osd_tier_r_lat.avgcount=0,osd_tier_r_lat.avgtime=0,osd_tier_r_lat.sum=0,pull=0,push=1,push_out_bytes=0,recovery_bytes=0,recovery_ops=0,stat_bytes=107369988096,stat_bytes_avail=106271932416,stat_bytes_used=1098055680,subop=134165,subop_in_bytes=89501237,subop_latency.avgcount=134165,subop_latency.avgtime=0.007313523,subop_latency.sum=981.218888627,subop_pull=0,subop_pull_latency.avgcount=0,subop_pull_latency.avgtime=0,subop_pull_latency.sum=0,subop_push=0,subop_push_in_bytes=0,subop_push_latency.avgcount=0,subop_push_latency.avgtime=0,subop_push_latency.sum=0,subop_w=134165,subop_w_in_bytes=89501237,subop_w_latency.avgcount=134165,subop_w_latency.avgtime=0.007313523,subop_w_latency.sum=981.218888627,tier_clean=0,tier_delay=0,tier_dirty=4,tier_evict=0,tier_flush=0,tier_flush_fail=0,tier_promote=0,tier_proxy_read=0,tier_proxy_write=0,tier_try_flush=0,tier_try_flush_fail=0,tier_whiteout=0 1587117698000000000
> ceph,collection=AsyncMessenger::Worker-1,host=stefanosd1,id=2,type=osd msgr_active_connections=746,msgr_created_connections=15212,msgr_recv_bytes=8633229006,msgr_recv_messages=4284202,msgr_running_fast_dispatch_time=153.820479102,msgr_running_recv_time=282.031655658,msgr_running_send_time=585.444749736,msgr_running_total_time=1231.431789242,msgr_send_bytes=11962769351,msgr_send_messages=4440622 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,host=stefanosd1,id=2,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,host=stefanosd1,id=2,type=osd get=2607136,get_or_fail_fail=0,get_or_fail_success=2607136,get_started=0,get_sum=5224700544,max=104857600,put=2607136,put_sum=5224700544,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=bluefs,host=stefanosd1,id=2,type=osd bytes_written_slow=0,bytes_written_sst=9065815,bytes_written_wal=901884611,db_total_bytes=4294967296,db_used_bytes=546308096,files_written_sst=3,files_written_wal=2,gift_bytes=0,log_bytes=225726464,log_compactions=1,logged_bytes=1195945984,max_bytes_db=1234173952,max_bytes_slow=0,max_bytes_wal=0,num_files=11,reclaim_bytes=0,slow_total_bytes=0,slow_used_bytes=0,wal_total_bytes=0,wal_used_bytes=0 1587117698000000000
> ceph,collection=recoverystate_perf,host=stefanosd1,id=2,type=osd activating_latency.avgcount=88,activating_latency.avgtime=0.086149065,activating_latency.sum=7.581117751,active_latency.avgcount=29,active_latency.avgtime=1790.849396082,active_latency.sum=51934.632486379,backfilling_latency.avgcount=0,backfilling_latency.avgtime=0,backfilling_latency.sum=0,clean_latency.avgcount=29,clean_latency.avgtime=1790.754765195,clean_latency.sum=51931.888190683,down_latency.avgcount=0,down_latency.avgtime=0,down_latency.sum=0,getinfo_latency.avgcount=134,getinfo_latency.avgtime=0.427567953,getinfo_latency.sum=57.294105786,getlog_latency.avgcount=88,getlog_latency.avgtime=0.011810192,getlog_latency.sum=1.03929697,getmissing_latency.avgcount=88,getmissing_latency.avgtime=0.000104598,getmissing_latency.sum=0.009204673,incomplete_latency.avgcount=0,incomplete_latency.avgtime=0,incomplete_latency.sum=0,initial_latency.avgcount=150,initial_latency.avgtime=0.001251361,initial_latency.sum=0.187704197,notbackfilling_latency.avgcount=0,notbackfilling_latency.avgtime=0,notbackfilling_latency.sum=0,notrecovering_latency.avgcount=0,notrecovering_latency.avgtime=0,notrecovering_latency.sum=0,peering_latency.avgcount=134,peering_latency.avgtime=0.998405763,peering_latency.sum=133.786372331,primary_latency.avgcount=75,primary_latency.avgtime=693.473306562,primary_latency.sum=52010.497992212,recovered_latency.avgcount=88,recovered_latency.avgtime=0.000609715,recovered_latency.sum=0.053654964,recovering_latency.avgcount=1,recovering_latency.avgtime=0.100713031,recovering_latency.sum=0.100713031,replicaactive_latency.avgcount=21,replicaactive_latency.avgtime=1790.852354921,replicaactive_latency.sum=37607.89945336,repnotrecovering_latency.avgcount=21,repnotrecovering_latency.avgtime=1790.852315529,repnotrecovering_latency.sum=37607.898626121,reprecovering_latency.avgcount=0,reprecovering_latency.avgtime=0,reprecovering_latency.sum=0,repwaitbackfillreserved_latency.avgcount=0,repwaitbackfillreserved_latency.avgtime=0,repwaitbackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgcount=0,repwaitrecoveryreserved_latency.avgtime=0,repwaitrecoveryreserved_latency.sum=0,reset_latency.avgcount=346,reset_latency.avgtime=0.126826803,reset_latency.sum=43.882073917,start_latency.avgcount=346,start_latency.avgtime=0.000233277,start_latency.sum=0.080713962,started_latency.avgcount=196,started_latency.avgtime=457.885378797,started_latency.sum=89745.534244237,stray_latency.avgcount=212,stray_latency.avgtime=1.013774396,stray_latency.sum=214.920172121,waitactingchange_latency.avgcount=0,waitactingchange_latency.avgtime=0,waitactingchange_latency.sum=0,waitlocalbackfillreserved_latency.avgcount=0,waitlocalbackfillreserved_latency.avgtime=0,waitlocalbackfillreserved_latency.sum=0,waitlocalrecoveryreserved_latency.avgcount=1,waitlocalrecoveryreserved_latency.avgtime=0.001572379,waitlocalrecoveryreserved_latency.sum=0.001572379,waitremotebackfillreserved_latency.avgcount=0,waitremotebackfillreserved_latency.avgtime=0,waitremotebackfillreserved_latency.sum=0,waitremoterecoveryreserved_latency.avgcount=1,waitremoterecoveryreserved_latency.avgtime=0.012729633,waitremoterecoveryreserved_latency.sum=0.012729633,waitupthru_latency.avgcount=88,waitupthru_latency.avgtime=0.857137729,waitupthru_latency.sum=75.428120205 1587117698000000000
> ceph,collection=throttle-objecter_ops,host=stefanosd1,id=2,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=bluestore,host=stefanosd1,id=2,type=osd bluestore_allocated=24248320,bluestore_blob_split=0,bluestore_blobs=83,bluestore_buffer_bytes=614400,bluestore_buffer_hit_bytes=161362,bluestore_buffer_miss_bytes=534799,bluestore_buffers=41,bluestore_compressed=0,bluestore_compressed_allocated=0,bluestore_compressed_original=0,bluestore_extent_compress=0,bluestore_extents=83,bluestore_fragmentation_micros=1,bluestore_gc_merged=0,bluestore_onode_hits=723852,bluestore_onode_misses=364,bluestore_onode_reshard=0,bluestore_onode_shard_hits=0,bluestore_onode_shard_misses=0,bluestore_onodes=348,bluestore_read_eio=0,bluestore_reads_with_retries=0,bluestore_stored=1984402,bluestore_txc=295997,bluestore_write_big=0,bluestore_write_big_blobs=0,bluestore_write_big_bytes=0,bluestore_write_small=60,bluestore_write_small_bytes=343843,bluestore_write_small_deferred=22,bluestore_write_small_new=38,bluestore_write_small_pre_read=22,bluestore_write_small_unused=0,commit_lat.avgcount=295997,commit_lat.avgtime=0.006994931,commit_lat.sum=2070.478673619,compress_lat.avgcount=0,compress_lat.avgtime=0,compress_lat.sum=0,compress_rejected_count=0,compress_success_count=0,csum_lat.avgcount=47,csum_lat.avgtime=0.000034434,csum_lat.sum=0.001618423,decompress_lat.avgcount=0,decompress_lat.avgtime=0,decompress_lat.sum=0,deferred_write_bytes=0,deferred_write_ops=0,kv_commit_lat.avgcount=291889,kv_commit_lat.avgtime=0.006347015,kv_commit_lat.sum=1852.624108527,kv_final_lat.avgcount=291885,kv_final_lat.avgtime=0.00004358,kv_final_lat.sum=12.720529751,kv_flush_lat.avgcount=291889,kv_flush_lat.avgtime=0.000000211,kv_flush_lat.sum=0.061636079,kv_sync_lat.avgcount=291889,kv_sync_lat.avgtime=0.006347227,kv_sync_lat.sum=1852.685744606,omap_lower_bound_lat.avgcount=1,omap_lower_bound_lat.avgtime=0.000004482,omap_lower_bound_lat.sum=0.000004482,omap_next_lat.avgcount=6933,omap_next_lat.avgtime=0.000003956,omap_next_lat.sum=0.027427456,omap_seek_to_first_lat.avgcount=309,omap_seek_to_first_lat.avgtime=0.000005879,omap_seek_to_first_lat.sum=0.001816658,omap_upper_bound_lat.avgcount=0,omap_upper_bound_lat.avgtime=0,omap_upper_bound_lat.sum=0,read_lat.avgcount=229,read_lat.avgtime=0.000394981,read_lat.sum=0.090450704,read_onode_meta_lat.avgcount=295,read_onode_meta_lat.avgtime=0.000016832,read_onode_meta_lat.sum=0.004965516,read_wait_aio_lat.avgcount=66,read_wait_aio_lat.avgtime=0.001237841,read_wait_aio_lat.sum=0.081697561,state_aio_wait_lat.avgcount=295997,state_aio_wait_lat.avgtime=0.000000357,state_aio_wait_lat.sum=0.105827433,state_deferred_aio_wait_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,state_deferred_aio_wait_lat.sum=0,state_deferred_cleanup_lat.avgcount=0,state_deferred_cleanup_lat.avgtime=0,state_deferred_cleanup_lat.sum=0,state_deferred_queued_lat.avgcount=0,state_deferred_queued_lat.avgtime=0,state_deferred_queued_lat.sum=0,state_done_lat.avgcount=295986,state_done_lat.avgtime=0.000003017,state_done_lat.sum=0.893199127,state_finishing_lat.avgcount=295986,state_finishing_lat.avgtime=0.000000306,state_finishing_lat.sum=0.090792683,state_io_done_lat.avgcount=295997,state_io_done_lat.avgtime=0.000001066,state_io_done_lat.sum=0.315577655,state_kv_commiting_lat.avgcount=295997,state_kv_commiting_lat.avgtime=0.006423586,state_kv_commiting_lat.sum=1901.362268572,state_kv_done_lat.avgcount=295997,state_kv_done_lat.avgtime=0.00000155,state_kv_done_lat.sum=0.458963064,state_kv_queued_lat.avgcount=295997,state_kv_queued_lat.avgtime=0.000477234,state_kv_queued_lat.sum=141.260101773,state_prepare_lat.avgcount=295997,state_prepare_lat.avgtime=0.000091806,state_prepare_lat.sum=27.174436583,submit_lat.avgcount=295997,submit_lat.avgtime=0.000135729,submit_lat.sum=40.17557682,throttle_lat.avgcount=295997,throttle_lat.avgtime=0.000002734,throttle_lat.sum=0.809479837,write_pad_bytes=151773,write_penalty_read_ops=0 1587117698000000000
> ceph,collection=throttle-bluestore_throttle_bytes,host=stefanosd1,id=2,type=osd get=295997,get_or_fail_fail=0,get_or_fail_success=0,get_started=295997,get_sum=198686579299,max=67108864,put=291889,put_sum=198686579299,take=0,take_sum=0,val=0,wait.avgcount=83,wait.avgtime=0.003670612,wait.sum=0.304660858 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-cluster,host=stefanosd1,id=2,type=osd get=452060,get_or_fail_fail=0,get_or_fail_success=452060,get_started=0,get_sum=269934345,max=104857600,put=452060,put_sum=269934345,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-bluestore_throttle_deferred_bytes,host=stefanosd1,id=2,type=osd get=11,get_or_fail_fail=0,get_or_fail_success=11,get_started=0,get_sum=7723117,max=201326592,put=0,put_sum=0,take=0,take_sum=0,val=7723117,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,host=stefanosd1,id=2,type=osd get=2607433,get_or_fail_fail=0,get_or_fail_success=2607433,get_started=0,get_sum=5225295732,max=104857600,put=2607433,put_sum=5225295732,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=finisher-objecter-finisher-0,host=stefanosd1,id=2,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000
> ceph,collection=cct,host=stefanosd1,id=2,type=osd total_workers=6,unhealthy_workers=0 1587117698000000000
> ceph,collection=AsyncMessenger::Worker-2,host=stefanosd1,id=2,type=osd msgr_active_connections=670,msgr_created_connections=13455,msgr_recv_bytes=6334605563,msgr_recv_messages=3287843,msgr_running_fast_dispatch_time=137.016615819,msgr_running_recv_time=240.687997039,msgr_running_send_time=471.710658466,msgr_running_total_time=1034.029109337,msgr_send_bytes=9753423475,msgr_send_messages=3439611 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-client,host=stefanosd1,id=2,type=osd get=710355,get_or_fail_fail=0,get_or_fail_success=710355,get_started=0,get_sum=166306283,max=104857600,put=710355,put_sum=166306283,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,host=stefanosd1,id=2,type=osd get=2607433,get_or_fail_fail=0,get_or_fail_success=2607433,get_started=0,get_sum=5225295732,max=104857600,put=2607433,put_sum=5225295732,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=AsyncMessenger::Worker-0,host=stefanosd1,id=2,type=osd msgr_active_connections=705,msgr_created_connections=17953,msgr_recv_bytes=7261438733,msgr_recv_messages=4496034,msgr_running_fast_dispatch_time=254.716476808,msgr_running_recv_time=272.196741555,msgr_running_send_time=571.102924903,msgr_running_total_time=1338.461077493,msgr_send_bytes=10772250508,msgr_send_messages=4192781 1587117698000000000
> ceph,collection=rocksdb,host=stefanosd1,id=2,type=osd compact=0,compact_queue_len=0,compact_queue_merge=0,compact_range=0,get=1424,get_latency.avgcount=1424,get_latency.avgtime=0.000030752,get_latency.sum=0.043792142,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=295997,submit_latency.avgtime=0.000173137,submit_latency.sum=51.248072285,submit_sync_latency.avgcount=291889,submit_sync_latency.avgtime=0.006094397,submit_sync_latency.sum=1778.887521449,submit_transaction=295997,submit_transaction_sync=291889 1587117698000000000
> ceph,collection=throttle-osd_client_bytes,host=stefanosd1,id=2,type=osd get=698701,get_or_fail_fail=0,get_or_fail_success=698701,get_started=0,get_sum=165630172,max=524288000,put=920880,put_sum=165630172,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000
> ceph,collection=mds_sessions,host=stefanmds1,id=stefanmds1,type=mds average_load=0,avg_session_uptime=0,session_add=0,session_count=0,session_remove=0,sessions_open=0,sessions_stale=0,total_load=0 1587117476000000000
> ceph,collection=mempool,host=stefanmds1,id=stefanmds1,type=mds bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=132069,buffer_anon_items=82,buffer_meta_bytes=0,buffer_meta_items=0,mds_co_bytes=44208,mds_co_items=154,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=16952,osdmap_items=139,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117476000000000
> ceph,collection=objecter,host=stefanmds1,id=stefanmds1,type=mds command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=1,omap_del=0,omap_rd=28,omap_wr=1,op=33,op_active=0,op_laggy=0,op_pg=0,op_r=26,op_reply=33,op_resend=2,op_rmw=0,op_send=35,op_send_bytes=364,op_w=7,osd_laggy=0,osd_session_close=91462,osd_session_open=91468,osd_sessions=6,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=5,osdop_getxattr=14,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=8,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=2,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=1,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117476000000000
> ceph,collection=cct,host=stefanmds1,id=stefanmds1,type=mds total_workers=1,unhealthy_workers=0 1587117476000000000
> ceph,collection=mds_server,host=stefanmds1,id=stefanmds1,type=mds cap_revoke_eviction=0,dispatch_client_request=0,dispatch_server_request=0,handle_client_request=0,handle_client_session=0,handle_slave_request=0,req_create_latency.avgcount=0,req_create_latency.avgtime=0,req_create_latency.sum=0,req_getattr_latency.avgcount=0,req_getattr_latency.avgtime=0,req_getattr_latency.sum=0,req_getfilelock_latency.avgcount=0,req_getfilelock_latency.avgtime=0,req_getfilelock_latency.sum=0,req_link_latency.avgcount=0,req_link_latency.avgtime=0,req_link_latency.sum=0,req_lookup_latency.avgcount=0,req_lookup_latency.avgtime=0,req_lookup_latency.sum=0,req_lookuphash_latency.avgcount=0,req_lookuphash_latency.avgtime=0,req_lookuphash_latency.sum=0,req_lookupino_latency.avgcount=0,req_lookupino_latency.avgtime=0,req_lookupino_latency.sum=0,req_lookupname_latency.avgcount=0,req_lookupname_latency.avgtime=0,req_lookupname_latency.sum=0,req_lookupparent_latency.avgcount=0,req_lookupparent_latency.avgtime=0,req_lookupparent_latency.sum=0,req_lookupsnap_latency.avgcount=0,req_lookupsnap_latency.avgtime=0,req_lookupsnap_latency.sum=0,req_lssnap_latency.avgcount=0,req_lssnap_latency.avgtime=0,req_lssnap_latency.sum=0,req_mkdir_latency.avgcount=0,req_mkdir_latency.avgtime=0,req_mkdir_latency.sum=0,req_mknod_latency.avgcount=0,req_mknod_latency.avgtime=0,req_mknod_latency.sum=0,req_mksnap_latency.avgcount=0,req_mksnap_latency.avgtime=0,req_mksnap_latency.sum=0,req_open_latency.avgcount=0,req_open_latency.avgtime=0,req_open_latency.sum=0,req_readdir_latency.avgcount=0,req_readdir_latency.avgtime=0,req_readdir_latency.sum=0,req_rename_latency.avgcount=0,req_rename_latency.avgtime=0,req_rename_latency.sum=0,req_renamesnap_latency.avgcount=0,req_renamesnap_latency.avgtime=0,req_renamesnap_latency.sum=0,req_rmdir_latency.avgcount=0,req_rmdir_latency.avgtime=0,req_rmdir_latency.sum=0,req_rmsnap_latency.avgcount=0,req_rmsnap_latency.avgtime=0,req_rmsnap_latency.sum=0,req_rmxattr_latency.avgcount=0,req_rmxattr_latency.avgtime=0,req_rmxattr_latency.sum=0,req_setattr_latency.avgcount=0,req_setattr_latency.avgtime=0,req_setattr_latency.sum=0,req_setdirlayout_latency.avgcount=0,req_setdirlayout_latency.avgtime=0,req_setdirlayout_latency.sum=0,req_setfilelock_latency.avgcount=0,req_setfilelock_latency.avgtime=0,req_setfilelock_latency.sum=0,req_setlayout_latency.avgcount=0,req_setlayout_latency.avgtime=0,req_setlayout_latency.sum=0,req_setxattr_latency.avgcount=0,req_setxattr_latency.avgtime=0,req_setxattr_latency.sum=0,req_symlink_latency.avgcount=0,req_symlink_latency.avgtime=0,req_symlink_latency.sum=0,req_unlink_latency.avgcount=0,req_unlink_latency.avgtime=0,req_unlink_latency.sum=0 1587117476000000000
> ceph,collection=AsyncMessenger::Worker-2,host=stefanmds1,id=stefanmds1,type=mds msgr_active_connections=84,msgr_created_connections=68511,msgr_recv_bytes=238078,msgr_recv_messages=2655,msgr_running_fast_dispatch_time=0.004247777,msgr_running_recv_time=25.369012545,msgr_running_send_time=3.743427461,msgr_running_total_time=130.277111559,msgr_send_bytes=172767043,msgr_send_messages=18172 1587117476000000000
> ceph,collection=mds_log,host=stefanmds1,id=stefanmds1,type=mds ev=0,evadd=0,evex=0,evexd=0,evexg=0,evtrm=0,expos=4194304,jlat.avgcount=0,jlat.avgtime=0,jlat.sum=0,rdpos=4194304,replayed=1,seg=1,segadd=0,segex=0,segexd=0,segexg=0,segtrm=0,wrpos=0 1587117476000000000
> ceph,collection=AsyncMessenger::Worker-0,host=stefanmds1,id=stefanmds1,type=mds msgr_active_connections=595,msgr_created_connections=943825,msgr_recv_bytes=78618003,msgr_recv_messages=914080,msgr_running_fast_dispatch_time=0.001544386,msgr_running_recv_time=459.627068807,msgr_running_send_time=469.337032316,msgr_running_total_time=2744.084305898,msgr_send_bytes=61684163658,msgr_send_messages=1858008 1587117476000000000
> ceph,collection=throttle-msgr_dispatch_throttler-mds,host=stefanmds1,id=stefanmds1,type=mds get=1216458,get_or_fail_fail=0,get_or_fail_success=1216458,get_started=0,get_sum=51976882,max=104857600,put=1216458,put_sum=51976882,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000
> ceph,collection=AsyncMessenger::Worker-1,host=stefanmds1,id=stefanmds1,type=mds msgr_active_connections=226,msgr_created_connections=42679,msgr_recv_bytes=63140151,msgr_recv_messages=299727,msgr_running_fast_dispatch_time=26.316138629,msgr_running_recv_time=36.969916165,msgr_running_send_time=70.457421128,msgr_running_total_time=226.230019936,msgr_send_bytes=193154464,msgr_send_messages=310481 1587117476000000000
> ceph,collection=mds,host=stefanmds1,id=stefanmds1,type=mds caps=0,dir_commit=0,dir_fetch=12,dir_merge=0,dir_split=0,exported=0,exported_inodes=0,forward=0,imported=0,imported_inodes=0,inode_max=2147483647,inodes=10,inodes_bottom=3,inodes_expired=0,inodes_pin_tail=0,inodes_pinned=10,inodes_top=7,inodes_with_caps=0,load_cent=0,openino_backtrace_fetch=0,openino_dir_fetch=0,openino_peer_discover=0,q=0,reply=0,reply_latency.avgcount=0,reply_latency.avgtime=0,reply_latency.sum=0,request=0,subtrees=2,traverse=0,traverse_dir_fetch=0,traverse_discover=0,traverse_forward=0,traverse_hit=0,traverse_lock=0,traverse_remote_ino=0 1587117476000000000
> ceph,collection=purge_queue,host=stefanmds1,id=stefanmds1,type=mds pq_executed=0,pq_executing=0,pq_executing_ops=0 1587117476000000000
> ceph,collection=throttle-write_buf_throttle,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=3758096384,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000
> ceph,collection=throttle-write_buf_throttle-0x5624e9377f40,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=3758096384,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000
> ceph,collection=mds_cache,host=stefanmds1,id=stefanmds1,type=mds ireq_enqueue_scrub=0,ireq_exportdir=0,ireq_flush=0,ireq_fragmentdir=0,ireq_fragstats=0,ireq_inodestats=0,num_recovering_enqueued=0,num_recovering_prioritized=0,num_recovering_processing=0,num_strays=0,num_strays_delayed=0,num_strays_enqueuing=0,recovery_completed=0,recovery_started=0,strays_created=0,strays_enqueued=0,strays_migrated=0,strays_reintegrated=0 1587117476000000000
> ceph,collection=throttle-objecter_bytes,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=16,put_sum=1016,take=33,take_sum=1016,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000
> ceph,collection=throttle-objecter_ops,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=33,put_sum=33,take=33,take_sum=33,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000
> ceph,collection=mds_mem,host=stefanmds1,id=stefanmds1,type=mds cap=0,cap+=0,cap-=0,dir=12,dir+=12,dir-=0,dn=10,dn+=10,dn-=0,heap=322284,ino=13,ino+=13,ino-=0,rss=76032 1587117476000000000
> ceph,collection=finisher-PurgeQueue,host=stefanmds1,id=stefanmds1,type=mds complete_latency.avgcount=4,complete_latency.avgtime=0.000176985,complete_latency.sum=0.000707941,queue_len=0 1587117476000000000
> ceph,collection=cct,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw total_workers=0,unhealthy_workers=0 1587117156000000000
> ceph,collection=throttle-objecter_bytes,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=791732,get_or_fail_fail=0,get_or_fail_success=791732,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000
> ceph,collection=rgw,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw cache_hit=0,cache_miss=791706,failed_req=0,get=0,get_b=0,get_initial_lat.avgcount=0,get_initial_lat.avgtime=0,get_initial_lat.sum=0,keystone_token_cache_hit=0,keystone_token_cache_miss=0,pubsub_event_lost=0,pubsub_event_triggered=0,pubsub_events=0,pubsub_push_failed=0,pubsub_push_ok=0,pubsub_push_pending=0,pubsub_store_fail=0,pubsub_store_ok=0,put=0,put_b=0,put_initial_lat.avgcount=0,put_initial_lat.avgtime=0,put_initial_lat.sum=0,qactive=0,qlen=0,req=791705 1587117156000000000
> ceph,collection=throttle-msgr_dispatch_throttler-radosclient,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=2697988,get_or_fail_fail=0,get_or_fail_success=2697988,get_started=0,get_sum=444563051,max=104857600,put=2697988,put_sum=444563051,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000
> ceph,collection=finisher-radosclient,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw complete_latency.avgcount=2,complete_latency.avgtime=0.003530161,complete_latency.sum=0.007060323,queue_len=0 1587117156000000000
> ceph,collection=throttle-rgw_async_rados_ops,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=64,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000
> ceph,collection=throttle-objecter_ops,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=791732,get_or_fail_fail=0,get_or_fail_success=791732,get_started=0,get_sum=791732,max=24576,put=791732,put_sum=791732,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000
> ceph,collection=throttle-objecter_bytes-0x5598969981c0,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=1637900,get_or_fail_fail=0,get_or_fail_success=1637900,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000
> ceph,collection=objecter,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw command_active=0,command_resend=0,command_send=0,linger_active=8,linger_ping=1905736,linger_resend=4,linger_send=13,map_epoch=203,map_full=0,map_inc=17,omap_del=0,omap_rd=0,omap_wr=0,op=2697488,op_active=0,op_laggy=0,op_pg=0,op_r=791730,op_reply=2697476,op_resend=1,op_rmw=0,op_send=2697490,op_send_bytes=362,op_w=1905758,osd_laggy=5,osd_session_close=59558,osd_session_open=59566,osd_sessions=8,osdop_append=0,osdop_call=1,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=8,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=791714,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=16,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=791706,osdop_truncate=0,osdop_watch=1905750,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117156000000000
> ceph,collection=AsyncMessenger::Worker-2,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw msgr_active_connections=11,msgr_created_connections=59839,msgr_recv_bytes=342697143,msgr_recv_messages=1441603,msgr_running_fast_dispatch_time=161.807937536,msgr_running_recv_time=118.174064257,msgr_running_send_time=207.679154333,msgr_running_total_time=698.527662129,msgr_send_bytes=530785909,msgr_send_messages=1679950 1587117156000000000
> ceph,collection=mempool,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=225471,buffer_anon_items=163,buffer_meta_bytes=0,buffer_meta_items=0,mds_co_bytes=0,mds_co_items=0,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=33904,osdmap_items=278,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117156000000000
> ceph,collection=throttle-msgr_dispatch_throttler-radosclient-0x559896998120,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=1652935,get_or_fail_fail=0,get_or_fail_success=1652935,get_started=0,get_sum=276333029,max=104857600,put=1652935,put_sum=276333029,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000
> ceph,collection=AsyncMessenger::Worker-1,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw msgr_active_connections=17,msgr_created_connections=84859,msgr_recv_bytes=211170759,msgr_recv_messages=922646,msgr_running_fast_dispatch_time=31.487443762,msgr_running_recv_time=83.190789333,msgr_running_send_time=174.670510496,msgr_running_total_time=484.22086275,msgr_send_bytes=1322113179,msgr_send_messages=1636839 1587117156000000000
> ceph,collection=finisher-radosclient-0x559896998080,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117156000000000
> ceph,collection=throttle-objecter_ops-0x559896997b80,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=1637900,get_or_fail_fail=0,get_or_fail_success=1637900,get_started=0,get_sum=1637900,max=24576,put=1637900,put_sum=1637900,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000
> ceph,collection=AsyncMessenger::Worker-0,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw msgr_active_connections=18,msgr_created_connections=74757,msgr_recv_bytes=489001094,msgr_recv_messages=1986686,msgr_running_fast_dispatch_time=168.60950961,msgr_running_recv_time=142.903031533,msgr_running_send_time=267.911165712,msgr_running_total_time=824.885614951,msgr_send_bytes=707973504,msgr_send_messages=2463727 1587117156000000000
> ceph,collection=objecter-0x559896997720,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=8,omap_del=0,omap_rd=0,omap_wr=0,op=1637998,op_active=0,op_laggy=0,op_pg=0,op_r=1062803,op_reply=1637998,op_resend=15,op_rmw=0,op_send=1638013,op_send_bytes=63321099,op_w=575195,osd_laggy=0,osd_session_close=125555,osd_session_open=125563,osd_sessions=8,osdop_append=0,osdop_call=1637886,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=112,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117156000000000
```

View File

@ -1,72 +0,0 @@
# CGroup Input Plugin
This input plugin will capture specific statistics per cgroup.
Consider restricting paths to the set of cgroups you really
want to monitor if you have a large number of cgroups, to avoid
any cardinality issues.
Following file formats are supported:
* Single value
```
VAL\n
```
* New line separated values
```
VAL0\n
VAL1\n
```
* Space separated values
```
VAL0 VAL1 ...\n
```
* New line separated key-space-value's
```
KEY0 VAL0\n
KEY1 VAL1\n
```
### Tags:
All measurements have the following tags:
- path
### Configuration:
```toml
# [[inputs.cgroup]]
# paths = [
# "/sys/fs/cgroup/memory", # root cgroup
# "/sys/fs/cgroup/memory/child1", # container cgroup
# "/sys/fs/cgroup/memory/child2/*", # all children cgroups under child2, but not child2 itself
# ]
# files = ["memory.*usage*", "memory.limit_in_bytes"]
```
### usage examples:
```toml
# [[inputs.cgroup]]
# paths = [
# "/sys/fs/cgroup/cpu", # root cgroup
# "/sys/fs/cgroup/cpu/*", # all container cgroups
# "/sys/fs/cgroup/cpu/*/*", # all children cgroups under each container cgroup
# ]
# files = ["cpuacct.usage", "cpu.cfs_period_us", "cpu.cfs_quota_us"]
# [[inputs.cgroup]]
# paths = [
# "/sys/fs/cgroup/unified/*", # root cgroup
# ]
# files = ["*"]
```

View File

@ -1,93 +0,0 @@
# chrony Input Plugin
Get standard chrony metrics, requires chronyc executable.
Below is the documentation of the various headers returned by `chronyc tracking`.
- Reference ID - This is the refid and name (or IP address) if available, of the
server to which the computer is currently synchronised. If this is 127.127.1.1
it means the computer is not synchronised to any external source and that you
have the local mode operating (via the local command in chronyc (see section local),
or the local directive in the /etc/chrony.conf file (see section local)).
- Stratum - The stratum indicates how many hops away from a computer with an attached
reference clock we are. Such a computer is a stratum-1 computer, so the computer in the
example is two hops away (i.e. a.b.c is a stratum-2 and is synchronised from a stratum-1).
- Ref time - This is the time (UTC) at which the last measurement from the reference
source was processed.
- System time - In normal operation, chronyd never steps the system clock, because any
jump in the timescale can have adverse consequences for certain application programs.
Instead, any error in the system clock is corrected by slightly speeding up or slowing
down the system clock until the error has been removed, and then returning to the system
clocks normal speed. A consequence of this is that there will be a period when the
system clock (as read by other programs using the gettimeofday() system call, or by the
date command in the shell) will be different from chronyd's estimate of the current true
time (which it reports to NTP clients when it is operating in server mode). The value
reported on this line is the difference due to this effect.
- Last offset - This is the estimated local offset on the last clock update.
- RMS offset - This is a long-term average of the offset value.
- Frequency - The frequency is the rate by which the systems clock would be
wrong if chronyd was not correcting it. It is expressed in ppm (parts per million).
For example, a value of 1ppm would mean that when the systems clock thinks it has
advanced 1 second, it has actually advanced by 1.000001 seconds relative to true time.
- Residual freq - This shows the residual frequency for the currently selected
reference source. This reflects any difference between what the measurements from the
reference source indicate the frequency should be and the frequency currently being used.
The reason this is not always zero is that a smoothing procedure is applied to the
frequency. Each time a measurement from the reference source is obtained and a new
residual frequency computed, the estimated accuracy of this residual is compared with the
estimated accuracy (see skew next) of the existing frequency value. A weighted average
is computed for the new frequency, with weights depending on these accuracies. If the
measurements from the reference source follow a consistent trend, the residual will be
driven to zero over time.
- Skew - This is the estimated error bound on the frequency.
- Root delay - This is the total of the network path delays to the stratum-1 computer
from which the computer is ultimately synchronised. In certain extreme situations, this
value can be negative. (This can arise in a symmetric peer arrangement where the computers
frequencies are not tracking each other and the network delay is very short relative to the
turn-around time at each computer.)
- Root dispersion - This is the total dispersion accumulated through all the computers
back to the stratum-1 computer from which the computer is ultimately synchronised.
Dispersion is due to system clock resolution, statistical measurement variations etc.
- Leap status - This is the leap status, which can be Normal, Insert second,
Delete second or Not synchronised.
### Configuration:
```toml
# Get standard chrony metrics, requires chronyc executable.
[[inputs.chrony]]
## If true, chronyc tries to perform a DNS lookup for the time server.
# dns_lookup = false
```
### Measurements & Fields:
- chrony
- system_time (float, seconds)
- last_offset (float, seconds)
- rms_offset (float, seconds)
- frequency (float, ppm)
- residual_freq (float, ppm)
- skew (float, ppm)
- root_delay (float, seconds)
- root_dispersion (float, seconds)
- update_interval (float, seconds)
### Tags:
- All measurements have the following tags:
- reference_id
- stratum
- leap_status
### Example Output:
```
$ telegraf --config telegraf.conf --input-filter chrony --test
* Plugin: chrony, Collection 1
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,system_time=0.000027073,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
```

View File

@ -1,44 +0,0 @@
# Cisco Model-Driven Telemetry (MDT) Input Plugin
Cisco model-driven telemetry (MDT) is an input plugin that consumes
telemetry data from Cisco IOS XR, IOS XE and NX-OS platforms. It supports TCP & GRPC dialout transports.
GRPC-based transport can utilize TLS for authentication and encryption.
Telemetry data is expected to be GPB-KV (self-describing-gpb) encoded.
The GRPC dialout transport is supported on various IOS XR (64-bit) 6.1.x and later, IOS XE 16.10 and later, as well as NX-OS 7.x and later platforms.
The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and later.
### Configuration:
```toml
[[inputs.cisco_telemetry_mdt]]
## Telemetry transport can be "tcp" or "grpc". TLS is only supported when
## using the grpc transport.
transport = "grpc"
## Address and port to host telemetry listener
service_address = ":57000"
## Enable TLS; grpc transport only.
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Enable TLS client authentication and define allowed CA certificates; grpc
## transport only.
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Define (for certain nested telemetry measurements with embedded tags) which fields are tags
# embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"]
## Define aliases to map telemetry encoding paths to simple measurement names
[inputs.cisco_telemetry_mdt.aliases]
ifstats = "ietf-interfaces:interfaces-state/interface/statistics"
```
### Example Output:
```
ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet2,source=csr1kv,subscription=101 in-unicast-pkts=27i,in-multicast-pkts=0i,discontinuity-time="2019-05-23T07:40:23.000362+00:00",in-octets=5233i,in-errors=0i,out-multicast-pkts=0i,out-discards=0i,in-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,out-unicast-pkts=0i,out-broadcast-pkts=0i,out-octets=0i,out-errors=0i 1559150462624000000
ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet1,source=csr1kv,subscription=101 in-octets=3394770806i,in-broadcast-pkts=0i,in-multicast-pkts=0i,out-broadcast-pkts=0i,in-unknown-protos=0i,out-octets=350212i,in-unicast-pkts=9477273i,in-discards=0i,out-unicast-pkts=2726i,out-discards=0i,discontinuity-time="2019-05-23T07:40:23.000363+00:00",in-errors=30i,out-multicast-pkts=0i,out-errors=0i 1559150462624000000
```

View File

@ -1,205 +0,0 @@
# ClickHouse Input Plugin
This plugin gathers the statistic data from [ClickHouse](https://github.com/ClickHouse/ClickHouse) server.
### Configuration
```toml
# Read metrics from one or many ClickHouse servers
[[inputs.clickhouse]]
## Username for authorization on ClickHouse server
## example: username = "default"
username = "default"
## Password for authorization on ClickHouse server
## example: password = "super_secret"
## HTTP(s) timeout while getting metrics values
## The timeout includes connection time, any redirects, and reading the response body.
## example: timeout = 1s
# timeout = 5s
## List of servers for metrics scraping
## metrics scrape via HTTP(s) clickhouse interface
## https://clickhouse.tech/docs/en/interfaces/http/
## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"]
servers = ["http://127.0.0.1:8123"]
## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster
## with using same "user:password" described in "user" and "password" parameters
## and get this server hostname list from "system.clusters" table
## see
## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters
## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers
## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/
## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables
## example: auto_discovery = false
# auto_discovery = true
## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
## when this filter present then "WHERE cluster IN (...)" filter will apply
## please use only full cluster names here, regexp and glob filters is not allowed
## for "/etc/clickhouse-server/config.d/remote.xml"
## <yandex>
## <remote_servers>
## <my-own-cluster>
## <shard>
## <replica><host>clickhouse-ru-1.local</host><port>9000</port></replica>
## <replica><host>clickhouse-ru-2.local</host><port>9000</port></replica>
## </shard>
## <shard>
## <replica><host>clickhouse-eu-1.local</host><port>9000</port></replica>
## <replica><host>clickhouse-eu-2.local</host><port>9000</port></replica>
## </shard>
## </my-onw-cluster>
## </remote_servers>
##
## </yandex>
##
## example: cluster_include = ["my-own-cluster"]
# cluster_include = []
## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
## when this filter present then "WHERE cluster NOT IN (...)" filter will apply
## example: cluster_exclude = ["my-internal-not-discovered-cluster"]
# cluster_exclude = []
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
```
### Metrics
- clickhouse_events
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- all rows from [system.events][]
+ clickhouse_metrics
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- all rows from [system.metrics][]
- clickhouse_asynchronous_metrics
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- all rows from [system.asynchronous_metrics][]
+ clickhouse_tables
- tags:
- source (ClickHouse server hostname)
- table
- database
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- bytes
- parts
- rows
- clickhouse_zookeeper
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- root_nodes (count of node from [system.zookeeper][] where path=/)
+ clickhouse_replication_queue
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- too_many_tries_replicas (count of replicas which have num_tries > 1 in `system.replication_queue`)
- clickhouse_detached_parts
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- detached_parts (total detached parts for all tables and databases from [system.detached_parts][])
+ clickhouse_dictionaries
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- dict_origin (xml Filename when dictionary created from *_dictionary.xml, database.table when dictionary created from DDL)
- fields:
- is_loaded (0 - when dictionary data not successful load, 1 - when dictionary data loading fail, see [system.dictionaries][] for details)
- bytes_allocated (how many bytes allocated in RAM after a dictionary loaded)
- clickhouse_mutations
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- running - gauge which show how much mutation doesn't complete now, see [system.mutations][] for details
- failed - counter which show total failed mutations from first clickhouse-server run
- completed - counter which show total successful finished mutations from first clickhouse-server run
+ clickhouse_disks
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- name (disk name in storage configuration)
- path (path to disk)
- fields:
- free_space_percent - 0-100, gauge which show current percent of free disk space bytes relative to total disk space bytes
- keep_free_space_percent - 0-100, gauge which show current percent of required keep free disk bytes relative to total disk space bytes
- clickhouse_processes
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- fields:
- percentile_50 - float gauge which show 50% percentile (quantile 0.5) for `elapsed` field of running processes, see [system.processes][] for details
- percentile_90 - float gauge which show 90% percentile (quantile 0.9) for `elapsed` field of running processes, see [system.processes][] for details
- longest_running - float gauge which show maximum value for `elapsed` field of running processes, see [system.processes][] for details
- clickhouse_text_log
- tags:
- source (ClickHouse server hostname)
- cluster (Name of the cluster [optional])
- shard_num (Shard number in the cluster [optional])
- level (message level, only message with level less or equal Notice is collects), see details on [system.text_log][]
- fields:
- messages_last_10_min - gauge which show how many messages collected
### Example Output
```
clickhouse_events,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 read_compressed_bytes=212i,arena_alloc_chunks=35i,function_execute=85i,merge_tree_data_writer_rows=3i,rw_lock_acquired_read_locks=421i,file_open=46i,io_buffer_alloc_bytes=86451985i,inserted_bytes=196i,regexp_created=3i,real_time_microseconds=116832i,query=23i,network_receive_elapsed_microseconds=268i,merge_tree_data_writer_compressed_bytes=1080i,arena_alloc_bytes=212992i,disk_write_elapsed_microseconds=556i,inserted_rows=3i,compressed_read_buffer_bytes=81i,read_buffer_from_file_descriptor_read_bytes=148i,write_buffer_from_file_descriptor_write=47i,merge_tree_data_writer_blocks=3i,soft_page_faults=896i,hard_page_faults=7i,select_query=21i,merge_tree_data_writer_uncompressed_bytes=196i,merge_tree_data_writer_blocks_already_sorted=3i,user_time_microseconds=40196i,compressed_read_buffer_blocks=5i,write_buffer_from_file_descriptor_write_bytes=3246i,io_buffer_allocs=296i,created_write_buffer_ordinary=12i,disk_read_elapsed_microseconds=59347044i,network_send_elapsed_microseconds=1538i,context_lock=1040i,insert_query=1i,system_time_microseconds=14582i,read_buffer_from_file_descriptor_read=3i 1569421000000000000
clickhouse_asynchronous_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 jemalloc.metadata_thp=0i,replicas_max_relative_delay=0i,jemalloc.mapped=1803177984i,jemalloc.allocated=1724839256i,jemalloc.background_thread.run_interval=0i,jemalloc.background_thread.num_threads=0i,uncompressed_cache_cells=0i,replicas_max_absolute_delay=0i,mark_cache_bytes=0i,compiled_expression_cache_count=0i,replicas_sum_queue_size=0i,number_of_tables=35i,replicas_max_merges_in_queue=0i,replicas_max_inserts_in_queue=0i,replicas_sum_merges_in_queue=0i,replicas_max_queue_size=0i,mark_cache_files=0i,jemalloc.background_thread.num_runs=0i,jemalloc.active=1726210048i,uptime=158i,jemalloc.retained=380481536i,replicas_sum_inserts_in_queue=0i,uncompressed_cache_bytes=0i,number_of_databases=2i,jemalloc.metadata=9207704i,max_part_count_for_partition=1i,jemalloc.resident=1742442496i 1569421000000000000
clickhouse_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 replicated_send=0i,write=0i,ephemeral_node=0i,zoo_keeper_request=0i,distributed_files_to_insert=0i,replicated_fetch=0i,background_schedule_pool_task=0i,interserver_connection=0i,leader_replica=0i,delayed_inserts=0i,global_thread_active=41i,merge=0i,readonly_replica=0i,memory_tracking_in_background_schedule_pool=0i,memory_tracking_for_merges=0i,zoo_keeper_session=0i,context_lock_wait=0i,storage_buffer_bytes=0i,background_pool_task=0i,send_external_tables=0i,zoo_keeper_watch=0i,part_mutation=0i,disk_space_reserved_for_merge=0i,distributed_send=0i,version_integer=19014003i,local_thread=0i,replicated_checks=0i,memory_tracking=0i,memory_tracking_in_background_processing_pool=0i,leader_election=0i,revision=54425i,open_file_for_read=0i,open_file_for_write=0i,storage_buffer_rows=0i,rw_lock_waiting_readers=0i,rw_lock_waiting_writers=0i,rw_lock_active_writers=0i,local_thread_active=0i,query_preempted=0i,tcp_connection=1i,http_connection=1i,read=2i,query_thread=0i,dict_cache_requests=0i,rw_lock_active_readers=1i,global_thread=43i,query=1i 1569421000000000000
clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=system,host=kshvakov,source=localhost,shard_num=1,table=trace_log bytes=754i,parts=1i,rows=1i 1569421000000000000
clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=default,host=kshvakov,source=localhost,shard_num=1,table=example bytes=326i,parts=2i,rows=2i 1569421000000000000
```
[system.events]: https://clickhouse.tech/docs/en/operations/system-tables/events/
[system.metrics]: https://clickhouse.tech/docs/en/operations/system-tables/metrics/
[system.asynchronous_metrics]: https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metrics/
[system.zookeeper]: https://clickhouse.tech/docs/en/operations/system-tables/zookeeper/
[system.detached_parts]: https://clickhouse.tech/docs/en/operations/system-tables/detached_parts/
[system.dictionaries]: https://clickhouse.tech/docs/en/operations/system-tables/dictionaries/
[system.mutations]: https://clickhouse.tech/docs/en/operations/system-tables/mutations/
[system.disks]: https://clickhouse.tech/docs/en/operations/system-tables/disks/
[system.processes]: https://clickhouse.tech/docs/en/operations/system-tables/processes/
[system.text_log]: https://clickhouse.tech/docs/en/operations/system-tables/text_log/

View File

@ -1,98 +0,0 @@
# Google Cloud PubSub Input Plugin
The GCP PubSub plugin ingests metrics from [Google Cloud PubSub][pubsub]
and creates metrics using one of the supported [input data formats][].
### Configuration
```toml
[[inputs.cloud_pubsub]]
## Required. Name of Google Cloud Platform (GCP) Project that owns
## the given PubSub subscription.
project = "my-project"
## Required. Name of PubSub subscription to ingest metrics from.
subscription = "my-subscription"
## Required. Data format to consume.
## Each data format has its own unique set of configuration options.
## Read more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
## Optional. Filepath for GCP credentials JSON file to authorize calls to
## PubSub APIs. If not set explicitly, Telegraf will attempt to use
## Application Default Credentials, which is preferred.
# credentials_file = "path/to/my/creds.json"
## Optional. Number of seconds to wait before attempting to restart the
## PubSub subscription receiver after an unexpected error.
## If the streaming pull for a PubSub Subscription fails (receiver),
## the agent attempts to restart receiving messages after this many seconds.
# retry_delay_seconds = 5
## Optional. Maximum byte length of a message to consume.
## Larger messages are dropped with an error. If less than 0 or unspecified,
## treated as no limit.
# max_message_len = 1000000
## Optional. Maximum messages to read from PubSub that have not been written
## to an output. Defaults to %d.
## For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message contains 10 metrics and the output
## metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## The following are optional Subscription ReceiveSettings in PubSub.
## Read more about these values:
## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings
## Optional. Maximum number of seconds for which a PubSub subscription
## should auto-extend the PubSub ACK deadline for each message. If less than
## 0, auto-extension is disabled.
# max_extension = 0
## Optional. Maximum number of unprocessed messages in PubSub
## (unacknowledged but not yet expired in PubSub).
## A value of 0 is treated as the default PubSub value.
## Negative values will be treated as unlimited.
# max_outstanding_messages = 0
## Optional. Maximum size in bytes of unprocessed messages in PubSub
## (unacknowledged but not yet expired in PubSub).
## A value of 0 is treated as the default PubSub value.
## Negative values will be treated as unlimited.
# max_outstanding_bytes = 0
## Optional. Max number of goroutines a PubSub Subscription receiver can spawn
## to pull messages from PubSub concurrently. This limit applies to each
## subscription separately and is treated as the PubSub default if less than
## 1. Note this setting does not limit the number of messages that can be
## processed concurrently (use "max_outstanding_messages" instead).
# max_receiver_go_routines = 0
## Optional. If true, Telegraf will attempt to base64 decode the
## PubSub message data before parsing. Many GCP services that
## output JSON to Google PubSub base64-encode the JSON payload.
# base64_data = false
```
### Multiple Subscriptions and Topics
This plugin assumes you have already created a PULL subscription for a given
PubSub topic. To learn how to do so, see [how to create a subscription][pubsub create sub].
Each plugin agent can listen to one subscription at a time, so you will
need to run multiple instances of the plugin to pull messages from multiple
subscriptions/topics.
[pubsub]: https://cloud.google.com/pubsub
[pubsub create sub]: https://cloud.google.com/pubsub/docs/admin#create_a_pull_subscription
[input data formats]: /docs/DATA_FORMATS_INPUT.md

View File

@ -1,72 +0,0 @@
# Google Cloud PubSub Push Input Plugin
The Google Cloud PubSub Push listener is a service input plugin that listens for messages sent via an HTTP POST from [Google Cloud PubSub][pubsub].
The plugin expects messages in Google's Pub/Sub JSON Format ONLY.
The intent of the plugin is to allow Telegraf to serve as an endpoint of the Google Pub/Sub 'Push' service.
Google's PubSub service will **only** send over HTTPS/TLS so this plugin must be behind a valid proxy or must be configured to use TLS.
Enable TLS by specifying the file names of a service TLS certificate and key.
Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in `tls_allowed_cacerts`.
### Configuration:
This is a sample configuration for the plugin.
```toml
[[inputs.cloud_pubsub_push]]
## Address and port to host HTTP listener on
service_address = ":8080"
## Application secret to verify messages originate from Cloud Pub/Sub
# token = ""
## Path to listen to.
# path = "/"
## Maximum duration before timing out read of the request
# read_timeout = "10s"
## Maximum duration before timing out write of the response. This should be set to a value
## large enough that you can send at least 'metric_batch_size' number of messages within the
## duration.
# write_timeout = "10s"
## Maximum allowed http request body size in bytes.
## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
# max_body_size = "500MB"
## Whether to add the pubsub metadata, such as message attributes and subscription as a tag.
# add_meta = false
## Optional. Maximum messages to read from PubSub that have not been written
## to an output. Defaults to 1000.
## For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message contains 10 metrics and the output
## metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Add service certificate and key
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```
This plugin assumes you have already created a PUSH subscription for a given
PubSub topic.
[pubsub]: https://cloud.google.com/pubsub
[input data formats]: /docs/DATA_FORMATS_INPUT.md

View File

@ -1,213 +0,0 @@
# Amazon CloudWatch Statistics Input Plugin
This plugin will pull Metric Statistics from Amazon CloudWatch.
### Amazon Authentication
This plugin uses a credential chain for Authentication with the CloudWatch
API endpoint. In the following order the plugin will attempt to authenticate.
1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules)
2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes
3. Shared profile from `profile` attribute
4. [Environment Variables](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#environment-variables)
5. [Shared Credentials](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#shared-credentials-file)
6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
### Configuration:
```toml
[[inputs.cloudwatch]]
## Amazon Region
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Assumed credentials via STS if role_arn is specified
## 2) explicit credentials from 'access_key' and 'secret_key'
## 3) shared profile from 'profile'
## 4) environment variables
## 5) shared credentials file
## 6) EC2 Instance Profile
# access_key = ""
# secret_key = ""
# token = ""
# role_arn = ""
# profile = ""
# shared_credential_file = ""
## Endpoint to make request against, the correct endpoint is automatically
## determined and this option should only be set if you wish to override the
## default.
## ex: endpoint_url = "http://localhost:8000"
# endpoint_url = ""
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Cloudwatch API
# and will not be collected by Telegraf.
#
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
period = "5m"
## Collection Delay (required - must account for metrics availability via CloudWatch API)
delay = "5m"
## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = "5m"
## Configure the TTL for the internal cache of metrics.
# cache_ttl = "1h"
## Metric Statistic Namespace (required)
namespace = "AWS/ELB"
## Maximum requests per second. Note that the global default AWS rate limit is
## 50 reqs/sec, so if you define multiple namespaces, these should add up to a
## maximum of 50.
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
# ratelimit = 25
## Timeout for http requests made by the cloudwatch client.
# timeout = "5s"
## Namespace-wide statistic filters. These allow fewer queries to be made to
## cloudwatch.
# statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
# statistic_exclude = []
## Metrics to Pull
## Defaults to all Metrics in Namespace if nothing is provided
## Refreshes Namespace available metrics every 1h
#[[inputs.cloudwatch.metrics]]
# names = ["Latency", "RequestCount"]
#
# ## Statistic filters for Metric. These allow for retrieving specific
# ## statistics for an individual metric.
# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
# # statistic_exclude = []
#
# ## Dimension filters for Metric. All dimensions defined for the metric names
# ## must be specified in order to retrieve the metric statistics.
# [[inputs.cloudwatch.metrics.dimensions]]
# name = "LoadBalancerName"
# value = "p-example"
```
#### Requirements and Terminology
Plugin Configuration utilizes [CloudWatch concepts](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html) and access pattern to allow monitoring of any CloudWatch Metric.
- `region` must be a valid AWS [Region](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#CloudWatchRegions) value
- `period` must be a valid CloudWatch [Period](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#CloudWatchPeriods) value
- `namespace` must be a valid CloudWatch [Namespace](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Namespace) value
- `names` must be valid CloudWatch [Metric](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Metric) names
- `dimensions` must be valid CloudWatch [Dimension](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Dimension) name/value pairs
Omitting or specifying a value of `'*'` for a dimension value configures all available metrics that contain a dimension with the specified name
to be retrieved. If specifying >1 dimension, then the metric must contain *all* the configured dimensions where the the value of the
wildcard dimension is ignored.
Example:
```
[[inputs.cloudwatch]]
period = "1m"
interval = "5m"
[[inputs.cloudwatch.metrics]]
names = ["Latency"]
## Dimension filters for Metric (optional)
[[inputs.cloudwatch.metrics.dimensions]]
name = "LoadBalancerName"
value = "p-example"
[[inputs.cloudwatch.metrics.dimensions]]
name = "AvailabilityZone"
value = "*"
```
If the following ELBs are available:
- name: `p-example`, availabilityZone: `us-east-1a`
- name: `p-example`, availabilityZone: `us-east-1b`
- name: `q-example`, availabilityZone: `us-east-1a`
- name: `q-example`, availabilityZone: `us-east-1b`
Then 2 metrics will be output:
- name: `p-example`, availabilityZone: `us-east-1a`
- name: `p-example`, availabilityZone: `us-east-1b`
If the `AvailabilityZone` wildcard dimension was omitted, then a single metric (name: `p-example`)
would be exported containing the aggregate values of the ELB across availability zones.
To maximize efficiency and savings, consider making fewer requests by increasing `interval` but keeping `period` at the duration you would like metrics to be reported. The above example will request metrics from Cloudwatch every 5 minutes but will output five metrics timestamped one minute apart.
#### Restrictions and Limitations
- CloudWatch metrics are not available instantly via the CloudWatch API. You should adjust your collection `delay` to account for this lag in metrics availability based on your [monitoring subscription level](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html)
- CloudWatch API usage incurs cost - see [GetMetricData Pricing](https://aws.amazon.com/cloudwatch/pricing/)
### Measurements & Fields:
Each CloudWatch Namespace monitored records a measurement with fields for each available Metric Statistic
Namespace and Metrics are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case)
- cloudwatch_{namespace}
- {metric}_sum (metric Sum value)
- {metric}_average (metric Average value)
- {metric}_minimum (metric Minimum value)
- {metric}_maximum (metric Maximum value)
- {metric}_sample_count (metric SampleCount value)
### Tags:
Each measurement is tagged with the following identifiers to uniquely identify the associated metric
Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case)
- All measurements have the following tags:
- region (CloudWatch Region)
- {dimension-name} (Cloudwatch Dimension value - one for each metric dimension)
### Troubleshooting:
You can use the aws cli to get a list of available metrics and dimensions:
```
aws cloudwatch list-metrics --namespace AWS/EC2 --region us-east-1
aws cloudwatch list-metrics --namespace AWS/EC2 --region us-east-1 --metric-name CPUCreditBalance
```
If the expected metrics are not returned, you can try getting them manually
for a short period of time:
```
aws cloudwatch get-metric-data \
--start-time 2018-07-01T00:00:00Z \
--end-time 2018-07-01T00:15:00Z \
--metric-data-queries '[
{
"Id": "avgCPUCreditBalance",
"MetricStat": {
"Metric": {
"Namespace": "AWS/EC2",
"MetricName": "CPUCreditBalance",
"Dimensions": [
{
"Name": "InstanceId",
"Value": "i-deadbeef"
}
]
},
"Period": 300,
"Stat": "Average"
},
"Label": "avgCPUCreditBalance"
}
]'
```
### Example Output:
```
$ ./telegraf --config telegraf.conf --input-filter cloudwatch --test
> cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1 latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000
```

View File

@ -1,56 +0,0 @@
# Conntrack Input Plugin
Collects stats from Netfilter's conntrack-tools.
The conntrack-tools provide a mechanism for tracking various aspects of
network connections as they are processed by netfilter. At runtime,
conntrack exposes many of those connection statistics within /proc/sys/net.
Depending on your kernel version, these files can be found in either
/proc/sys/net/ipv4/netfilter or /proc/sys/net/netfilter and will be
prefixed with either ip_ or nf_. This plugin reads the files specified
in its configuration and publishes each one as a field, with the prefix
normalized to ip_.
In order to simplify configuration in a heterogeneous environment, a superset
of directory and filenames can be specified. Any locations that don't exist
will be ignored.
For more information on conntrack-tools, see the
[Netfilter Documentation](http://conntrack-tools.netfilter.org/).
### Configuration:
```toml
# Collects conntrack stats from the configured directories and files.
[[inputs.conntrack]]
## The following defaults would work with multiple versions of conntrack.
## Note the nf_ and ip_ filename prefixes are mutually exclusive across
## kernel versions, as are the directory locations.
## Superset of filenames to look for within the conntrack dirs.
## Missing files will be ignored.
files = ["ip_conntrack_count","ip_conntrack_max",
"nf_conntrack_count","nf_conntrack_max"]
## Directories to search within for the conntrack files above.
## Missing directories will be ignored.
dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
```
### Measurements & Fields:
- conntrack
- ip_conntrack_count (int, count): the number of entries in the conntrack table
- ip_conntrack_max (int, size): the max capacity of the conntrack table
### Tags:
This input does not use tags.
### Example Output:
```
$ ./telegraf --config telegraf.conf --input-filter conntrack --test
conntrack,host=myhost ip_conntrack_count=2,ip_conntrack_max=262144 1461620427667995735
```

View File

@ -1,67 +0,0 @@
# Consul Input Plugin
This plugin will collect statistics about all health checks registered in the
Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
to query the data. It will not report the
[telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can
report those stats already using StatsD protocol if needed.
### Configuration:
```toml
# Gather health check statuses from services registered in Consul
[[inputs.consul]]
## Consul server address
# address = "localhost:8500"
## URI scheme for the Consul server, one of "http", "https"
# scheme = "http"
## ACL token used in every request
# token = ""
## HTTP Basic Authentication username and password.
# username = ""
# password = ""
## Data center to query the health checks from
# datacenter = ""
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true
## Consul checks' tag splitting
# When tags are formatted like "key:value" with ":" as a delimiter then
# they will be splitted and reported as proper key:value in Telegraf
# tag_delimiter = ":"
```
### Metrics:
- consul_health_checks
- tags:
- node (node that check/service is registered on)
- service_name
- check_id
- fields:
- check_name
- service_id
- status
- passing (integer)
- critical (integer)
- warning (integer)
`passing`, `critical`, and `warning` are integer representations of the health
check state. A value of `1` represents that the status was the state of the
the health check at this sample.
## Example output
```
consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902
consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036
```

Some files were not shown because too many files have changed in this diff Show More