feat(pkger): add telegraf as a valid resource to the pkger parser

pull/16101/head
Johnny Steenbergen 2019-12-03 10:22:59 -08:00 committed by Johnny Steenbergen
parent d0a336fbf8
commit 996c88e953
5 changed files with 260 additions and 5 deletions

View File

@ -300,11 +300,12 @@ func (d DiffVariable) hasConflict() bool {
// Summary is a definition of all the resources that have or
// will be created from a pkg.
type Summary struct {
Buckets []SummaryBucket `json:"buckets"`
Dashboards []SummaryDashboard `json:"dashboards"`
Labels []SummaryLabel `json:"labels"`
LabelMappings []SummaryLabelMapping `json:"labelMappings"`
Variables []SummaryVariable `json:"variables"`
Buckets []SummaryBucket `json:"buckets"`
Dashboards []SummaryDashboard `json:"dashboards"`
Labels []SummaryLabel `json:"labels"`
LabelMappings []SummaryLabelMapping `json:"labelMappings"`
TelegrafConfigs []SummaryTelegraf `json:"telegrafConfigs"`
Variables []SummaryVariable `json:"variables"`
}
// SummaryBucket provides a summary of a pkg bucket.
@ -381,6 +382,12 @@ type SummaryLabelMapping struct {
influxdb.LabelMapping
}
// SummaryTelegraf provides a summary of a pkg telegraf config.
type SummaryTelegraf struct {
influxdb.TelegrafConfig
LabelAssociations []influxdb.Label `json:"labelAssociations"`
}
// SummaryVariable provides a summary of a pkg variable.
type SummaryVariable struct {
influxdb.Variable
@ -704,6 +711,31 @@ func (s sortedLogos) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
const (
fieldTelegrafConfig = "config"
)
type telegraf struct {
config influxdb.TelegrafConfig
labels sortedLogos
}
func (t *telegraf) Name() string {
return t.config.Name
}
func (t *telegraf) ResourceType() influxdb.ResourceType {
return influxdb.TelegrafsResourceType
}
func (t *telegraf) summarize() SummaryTelegraf {
return SummaryTelegraf{
TelegrafConfig: t.config,
LabelAssociations: toInfluxLabels(t.labels...),
}
}
const (
fieldArgTypeConstant = "constant"
fieldArgTypeMap = "map"

View File

@ -12,6 +12,7 @@ import (
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/influxdata/influxdb"
"gopkg.in/yaml.v3"
)
@ -136,6 +137,7 @@ type Pkg struct {
mBuckets map[string]*bucket
mDashboards []*dashboard
mVariables map[string]*variable
mTelegrafs []*telegraf
isVerified bool // dry run has verified pkg resources with existing resources
isParsed bool // indicates the pkg has been parsed and all resources graphed accordingly
@ -167,6 +169,10 @@ func (p *Pkg) Summary() Summary {
})
}
for _, t := range p.telegrafs() {
sum.TelegrafConfigs = append(sum.TelegrafConfigs, t.summarize())
}
for _, v := range p.variables() {
sum.Variables = append(sum.Variables, v.summarize())
}
@ -253,6 +259,12 @@ func (p *Pkg) dashboards() []*dashboard {
return dashes
}
func (p *Pkg) telegrafs() []*telegraf {
teles := p.mTelegrafs[:]
sort.Slice(teles, func(i, j int) bool { return teles[i].Name() < teles[j].Name() })
return teles
}
func (p *Pkg) variables() []*variable {
vars := make([]*variable, 0, len(p.mVariables))
for _, v := range p.mVariables {
@ -369,6 +381,7 @@ func (p *Pkg) graphResources() error {
p.graphVariables,
p.graphBuckets,
p.graphDashboards,
p.graphTelegrafs,
}
var pErr parseErr
@ -518,6 +531,33 @@ func (p *Pkg) graphVariables() error {
})
}
func (p *Pkg) graphTelegrafs() error {
p.mTelegrafs = make([]*telegraf, 0)
return p.eachResource(KindTelegraf, 0, func(r Resource) []validationErr {
tele := new(telegraf)
failures := p.parseNestedLabels(r, func(l *label) error {
tele.labels = append(tele.labels, l)
p.mLabels[l.Name()].setMapping(tele, false)
return nil
})
sort.Sort(tele.labels)
cfgBytes := []byte(r.stringShort(fieldTelegrafConfig))
if err := toml.Unmarshal(cfgBytes, &tele.config); err != nil {
failures = append(failures, validationErr{
Field: fieldTelegrafConfig,
Msg: err.Error(),
})
}
tele.config.Name = r.Name()
tele.config.Description = r.stringShort(fieldDescription)
p.mTelegrafs = append(p.mTelegrafs, tele)
return failures
})
}
func (p *Pkg) eachResource(resourceKind Kind, minNameLen int, fn func(r Resource) []validationErr) error {
var pErr parseErr
for i, r := range p.Spec.Resources {

View File

@ -2699,6 +2699,48 @@ spec:
})
})
t.Run("pkg with telegraf and label associations", func(t *testing.T) {
t.Run("with valid fields", func(t *testing.T) {
testfileRunner(t, "testdata/telegraf", func(t *testing.T, pkg *Pkg) {
sum := pkg.Summary()
require.Len(t, sum.TelegrafConfigs, 1)
actual := sum.TelegrafConfigs[0]
assert.Equal(t, "first_tele_config", actual.Name)
assert.Equal(t, "desc", actual.Description)
require.Len(t, actual.LabelAssociations, 1)
assert.Equal(t, "label_1", actual.LabelAssociations[0].Name)
})
})
t.Run("handles bad config", func(t *testing.T) {
tests := []testPkgResourceError{
{
name: "config missing",
validationErrs: 1,
valFields: []string{"config"},
pkgStr: `apiVersion: 0.1.0
kind: Package
meta:
pkgName: pkg_name
pkgVersion: 1
description: pack description
spec:
resources:
- kind: Telegraf
name: tele_name
description: desc
`,
},
}
for _, tt := range tests {
testPkgErrors(t, KindTelegraf, tt)
}
})
})
t.Run("pkg with a variable", func(t *testing.T) {
t.Run("with valid fields should produce summary", func(t *testing.T) {
testfileRunner(t, "testdata/variables", func(t *testing.T, pkg *Pkg) {

31
pkger/testdata/telegraf.json vendored Normal file
View File

@ -0,0 +1,31 @@
{
"apiVersion": "0.1.0",
"kind": "Package",
"meta": {
"pkgName": "pkg_name",
"pkgVersion": "1",
"description": "pack description"
},
"spec": {
"resources": [
{
"kind": "Label",
"name": "label_1"
},
{
"kind": "Telegraf",
"name": "first_tele_config",
"description": "desc",
"associations": [
{
"kind": "Label",
"name": "label_1"
}
],
"config": "# Configuration for telegraf agent\n [agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n [[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:9999\n urls = [\"http://localhost:9999\"]\n\n ## Token for authentication.\n token = \"$INFLUX_TOKEN\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"rg\"\n\n ## Destination bucket to write into.\n bucket = \"rucket_3\"\n [[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n [[inputs.disk]]\n ## By default stats will be gathered for all mount points.\n ## Set mount_points will restrict the stats to only the specified mount points.\n # mount_points = [\"/\"]\n ## Ignore mount points by filesystem type.\n ignore_fs = [\"tmpfs\", \"devtmpfs\", \"devfs\", \"overlay\", \"aufs\", \"squashfs\"]\n [[inputs.diskio]]\n [[inputs.mem]]\n [[inputs.net]]\n [[inputs.processes]]\n [[inputs.swap]]\n [[inputs.system]]"
}
]
}
}

110
pkger/testdata/telegraf.yml vendored Normal file
View File

@ -0,0 +1,110 @@
apiVersion: 0.1.0
kind: Package
meta:
pkgName: pkg_name
pkgVersion: 1
description: pack description
spec:
resources:
- kind: Label
name: label_1
- kind: Telegraf
name: first_tele_config
description: desc
associations:
- kind: Label
name: label_1
config: |
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
## output, and will flush this buffer on a successful write. Oldest metrics
## are dropped first when this buffer fills.
## This buffer only fills when writes fail to output plugin(s).
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Logging configuration:
## Run telegraf with debug log messages.
debug = false
## Run telegraf in quiet mode (error log messages only).
quiet = false
## Specify the log file name. The empty string means to log to stderr.
logfile = ""
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
[[outputs.influxdb_v2]]
## The URLs of the InfluxDB cluster nodes.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
## urls exp: http://127.0.0.1:9999
urls = ["http://localhost:9999"]
## Token for authentication.
token = "$INFLUX_TOKEN"
## Organization is the name of the organization you wish to write to; must exist.
organization = "rg"
## Destination bucket to write into.
bucket = "rucket_3"
[[inputs.cpu]]
## Whether to report per-cpu stats or not
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
## If true, collect raw CPU time metrics.
collect_cpu_time = false
## If true, compute and report the sum of all non-idle CPU states.
report_active = false
[[inputs.disk]]
## By default stats will be gathered for all mount points.
## Set mount_points will restrict the stats to only the specified mount points.
# mount_points = ["/"]
## Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
[[inputs.diskio]]
[[inputs.mem]]
[[inputs.net]]
[[inputs.processes]]
[[inputs.swap]]
[[inputs.system]]