diff --git a/telegraf.go b/telegraf.go index fb68529b16..c439239168 100644 --- a/telegraf.go +++ b/telegraf.go @@ -232,17 +232,22 @@ func decodePluginRaw(tcd *telegrafConfigDecode) ([]string, string, error) { } if !ok { - return nil, "", &errors.Error{ - Code: errors.EInvalid, - Op: op, - Msg: fmt.Sprintf(ErrUnsupportTelegrafPluginName, pr.Name, pr.Type), + // This removes the validation (and does not create toml) for new "input" plugins + // but keeps in place the existing behavior for certain "input" plugins + if pr.Type == "output" { + return nil, "", &errors.Error{ + Code: errors.EInvalid, + Op: op, + Msg: fmt.Sprintf(ErrUnsupportTelegrafPluginName, pr.Name, pr.Type), + } } + continue } config := tpFn() // if pr.Config if empty, make it a blank obj, // so it will still go to the unmarshalling process to validate. - if len(string(pr.Config)) == 0 { + if pr.Config == nil || len(string(pr.Config)) == 0 { pr.Config = []byte("{}") } @@ -261,6 +266,7 @@ func decodePluginRaw(tcd *telegrafConfigDecode) ([]string, string, error) { } ps += config.TOML() + } return bucket, ps, nil diff --git a/telegraf/plugins/inputs/cpu.go b/telegraf/plugins/inputs/cpu.go index 23bba5d793..228564257d 100644 --- a/telegraf/plugins/inputs/cpu.go +++ b/telegraf/plugins/inputs/cpu.go @@ -26,9 +26,9 @@ func (c *CPUStats) TOML() string { percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## If true, collect raw CPU time metrics. + ## If true, collect raw CPU time metrics collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. + ## If true, compute and report the sum of all non-idle CPU states report_active = false `, c.PluginName()) } diff --git a/telegraf/plugins/inputs/disk.go b/telegraf/plugins/inputs/disk.go index 8c9b4e1392..4ba3ba7420 100644 --- a/telegraf/plugins/inputs/disk.go +++ b/telegraf/plugins/inputs/disk.go @@ -26,6 +26,6 @@ func (d *DiskStats) TOML() string { ## Set mount_points will restrict the stats to only the specified mount points. # mount_points = ["/"] ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] `, d.PluginName()) } diff --git a/telegraf/plugins/inputs/diskio.go b/telegraf/plugins/inputs/diskio.go index 19a2ce53d8..195fcefbd0 100644 --- a/telegraf/plugins/inputs/diskio.go +++ b/telegraf/plugins/inputs/diskio.go @@ -22,5 +22,30 @@ func (d *DiskIO) UnmarshalTOML(data interface{}) error { // TOML encodes to toml string. func (d *DiskIO) TOML() string { return fmt.Sprintf(`[[inputs.%s]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + # devices = ["sda", "sdb", "vd*"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] `, d.PluginName()) } diff --git a/telegraf/plugins/inputs/docker.go b/telegraf/plugins/inputs/docker.go index 7bd855fd26..f7b9dd29ae 100644 --- a/telegraf/plugins/inputs/docker.go +++ b/telegraf/plugins/inputs/docker.go @@ -28,44 +28,73 @@ func (d *Docker) UnmarshalTOML(data interface{}) error { // TOML encodes to toml string func (d *Docker) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] + return fmt.Sprintf(`[[inputs.%s]] ## Docker Endpoint ## To use TCP, set endpoint = "tcp://[ip]:[port]" ## To use environment variables (ie, docker-machine), set endpoint = "ENV" - ## exp: unix:///var/run/docker.sock endpoint = "%s" - + # ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) gather_services = false - + # ## Only collect metrics for these containers, collect all if empty container_names = [] - + # + ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars + source_tag = false + # ## Containers to include and exclude. Globs accepted. ## Note that an empty array for both will include all containers container_name_include = [] container_name_exclude = [] - + # ## Container states to include and exclude. Globs accepted. ## When empty only containers in the "running" state will be captured. + ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] + ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] # container_state_include = [] # container_state_exclude = [] - + # ## Timeout for docker list, info, and stats commands timeout = "5s" - - ## Whether to report for each container per-device blkio (8:0, 8:1...) and - ## network (eth0, eth1, ...) stats or not + # + ## Whether to report for each container per-device blkio (8:0, 8:1...), + ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. + ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting + ## is honored. perdevice = true - - ## Whether to report for each container total blkio and network stats or not + # + ## Specifies for which classes a per-device metric should be issued + ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) + ## Please note that this setting has no effect if 'perdevice' is set to 'true' + # perdevice_include = ["cpu"] + # + ## Whether to report for each container total blkio and network stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. + ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting + ## is honored. total = false - + # + ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. + ## Possible values are 'cpu', 'blkio' and 'network' + ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. + ## Please note that this setting has no effect if 'total' is set to 'false' + # total_include = ["cpu", "blkio", "network"] + # ## Which environment variables should we use as a tag ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] + # ## docker labels to include and exclude as tags. Globs accepted. ## Note that an empty array for both will include all labels as tags docker_label_include = [] docker_label_exclude = [] + # + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false `, d.PluginName(), d.Endpoint) } diff --git a/telegraf/plugins/inputs/file.go b/telegraf/plugins/inputs/file.go index 705f598900..00090d0a99 100644 --- a/telegraf/plugins/inputs/file.go +++ b/telegraf/plugins/inputs/file.go @@ -40,15 +40,24 @@ func (f *File) TOML() string { for k, v := range f.Files { s[k] = strconv.Quote(v) } - return fmt.Sprintf(`[[inputs.%s]] - ## Files to parse each interval. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## /var/log/**.log -> recursively find all .log files in /var/log - ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only read the apache log file + return fmt.Sprintf(`[[inputs.%s]] + ## Files to parse each interval. Accept standard unix glob matching rules, + ## as well as ** to match recursive files and directories. files = [%s] + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. + # file_tag = "" + + ## Character encoding to use when interpreting the file contents. Invalid + ## characters are replaced using the unicode replacement character. When set + ## to the empty string the data is not decoded to text. + ## ex: character_encoding = "utf-8" + ## character_encoding = "utf-16le" + ## character_encoding = "utf-16be" + ## character_encoding = "" + # character_encoding = "" + ## The dataformat to be read from files ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/telegraf/plugins/inputs/inputs_test.go b/telegraf/plugins/inputs/inputs_test.go index cf42ed3241..452ed15fe6 100644 --- a/telegraf/plugins/inputs/inputs_test.go +++ b/telegraf/plugins/inputs/inputs_test.go @@ -36,9 +36,9 @@ func TestEncodeTOML(t *testing.T) { percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## If true, collect raw CPU time metrics. + ## If true, collect raw CPU time metrics collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. + ## If true, compute and report the sum of all non-idle CPU states report_active = false `, &DiskStats{}: `[[inputs.disk]] @@ -46,71 +46,158 @@ func TestEncodeTOML(t *testing.T) { ## Set mount_points will restrict the stats to only the specified mount points. # mount_points = ["/"] ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] `, - &DiskIO{}: "[[inputs.diskio]]\n", - &Docker{}: `[[inputs.docker]] + &DiskIO{}: `[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + # devices = ["sda", "sdb", "vd*"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] +`, + &Docker{}: `[[inputs.docker]] ## Docker Endpoint ## To use TCP, set endpoint = "tcp://[ip]:[port]" ## To use environment variables (ie, docker-machine), set endpoint = "ENV" - ## exp: unix:///var/run/docker.sock endpoint = "" - + # ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) gather_services = false - + # ## Only collect metrics for these containers, collect all if empty container_names = [] - + # + ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars + source_tag = false + # ## Containers to include and exclude. Globs accepted. ## Note that an empty array for both will include all containers container_name_include = [] container_name_exclude = [] - + # ## Container states to include and exclude. Globs accepted. ## When empty only containers in the "running" state will be captured. + ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] + ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] # container_state_include = [] # container_state_exclude = [] - + # ## Timeout for docker list, info, and stats commands timeout = "5s" - - ## Whether to report for each container per-device blkio (8:0, 8:1...) and - ## network (eth0, eth1, ...) stats or not + # + ## Whether to report for each container per-device blkio (8:0, 8:1...), + ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. + ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting + ## is honored. perdevice = true - - ## Whether to report for each container total blkio and network stats or not + # + ## Specifies for which classes a per-device metric should be issued + ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) + ## Please note that this setting has no effect if 'perdevice' is set to 'true' + # perdevice_include = ["cpu"] + # + ## Whether to report for each container total blkio and network stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. + ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting + ## is honored. total = false - + # + ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. + ## Possible values are 'cpu', 'blkio' and 'network' + ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. + ## Please note that this setting has no effect if 'total' is set to 'false' + # total_include = ["cpu", "blkio", "network"] + # ## Which environment variables should we use as a tag ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] + # ## docker labels to include and exclude as tags. Globs accepted. ## Note that an empty array for both will include all labels as tags docker_label_include = [] docker_label_exclude = [] + # + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false `, - &File{}: `[[inputs.file]] - ## Files to parse each interval. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## /var/log/**.log -> recursively find all .log files in /var/log - ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only read the apache log file + &File{}: `[[inputs.file]] + ## Files to parse each interval. Accept standard unix glob matching rules, + ## as well as ** to match recursive files and directories. files = [] + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. + # file_tag = "" + + ## Character encoding to use when interpreting the file contents. Invalid + ## characters are replaced using the unicode replacement character. When set + ## to the empty string the data is not decoded to text. + ## ex: character_encoding = "utf-8" + ## character_encoding = "utf-16le" + ## character_encoding = "utf-16be" + ## character_encoding = "" + # character_encoding = "" + ## The dataformat to be read from files ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" `, - &Kernel{}: "[[inputs.kernel]]\n", + &Kernel{}: `[[inputs.kernel]] + # no configuration +`, &Kubernetes{}: `[[inputs.kubernetes]] ## URL for the kubelet - ## exp: http://1.1.1.1:10255 - url = "" + url = "" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## Pod labels to be added as tags. An empty array for both include and + ## exclude will include all labels. + # label_include = [] + # label_exclude = ["*"] + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false `, - &LogParserPlugin{}: `[[inputs.logparser]] + &LogParserPlugin{}: `[[inputs.logparser]] ## Log files to parse. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: @@ -123,8 +210,10 @@ func TestEncodeTOML(t *testing.T) { ## while telegraf is running (and that match the "files" globs) will always ## be read from the beginning. from_beginning = false + ## Method used to watch for file updates. Can be either "inotify" or "poll". # watch_method = "inotify" + ## Parse logstash-style "grok" patterns: [inputs.logparser.grok] ## This is a list of patterns to check the given log file(s) for. @@ -134,11 +223,48 @@ func TestEncodeTOML(t *testing.T) { ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) patterns = ["%{COMBINED_LOG_FORMAT}"] + ## Name of the outputted measurement name. measurement = "apache_access_log" + + ## Full path(s) to custom pattern files. + custom_pattern_files = [] + + ## Custom patterns can also be defined here. Put one pattern per line. + custom_patterns = ''' + ''' + + ## Timezone allows you to provide an override for timestamps that + ## don't already include an offset + ## e.g. 04/06/2016 12:41:45 data one two 5.43µs + ## + ## Default: "" which renders UTC + ## Options are as follows: + ## 1. Local -- interpret based on machine localtime + ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + ## 3. UTC -- or blank/unspecified, will return timestamp in UTC + # timezone = "Canada/Eastern" + + ## When set to "disable", timestamp will not incremented if there is a + ## duplicate. + # unique_timestamp = "auto" +`, + &MemStats{}: `[[inputs.mem]] + # no configuration +`, + &NetIOStats{}: `[[inputs.net]] + ## By default, telegraf gathers stats from any up interface (excluding loopback) + ## Setting interfaces will tell it to gather these explicit interfaces, + ## regardless of status. + ## + # interfaces = ["eth0"] + ## + ## On linux systems telegraf also collects protocol stats. + ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. + ## + # ignore_protocol_stats = false + ## `, - &MemStats{}: "[[inputs.mem]]\n", - &NetIOStats{}: "[[inputs.net]]\n", &NetResponse{}: `[[inputs.net_response]] ## Protocol, must be "tcp" or "udp" ## NOTE: because the "udp" protocol does not respond to requests, it requires @@ -146,20 +272,152 @@ func TestEncodeTOML(t *testing.T) { protocol = "tcp" ## Server address (default localhost) address = "localhost:80" + + ## Set timeout + # timeout = "1s" + + ## Set read timeout (only used if expecting a response) + # read_timeout = "1s" + + ## The following options are required for UDP checks. For TCP, they are + ## optional. The plugin will send the given string to the server and then + ## expect to receive the given 'expect' string back. + ## string sent to the server + # send = "ssh" + ## expected string in answer + # expect = "ssh" + + ## Uncomment to remove deprecated fields + # fielddrop = ["result_type", "string_found"] `, &Nginx{}: `[[inputs.nginx]] # An array of Nginx stub_status URI to gather stats. - # exp http://localhost/server_status urls = [] + + ## Optional TLS Config + tls_ca = "/etc/telegraf/ca.pem" + tls_cert = "/etc/telegraf/cert.cer" + tls_key = "/etc/telegraf/key.key" + ## Use TLS but skip chain & host verification + insecure_skip_verify = false + + # HTTP response timeout (default: 5s) + response_timeout = "5s" +`, + &Processes{}: `[[inputs.processes]] + # no configuration `, - &Processes{}: "[[inputs.processes]]\n", &Procstat{}: `[[inputs.procstat]] + ## PID file to monitor process + pid_file = "/var/run/nginx.pid" ## executable name (ie, pgrep ) - exe = "" + # exe = "" + ## pattern as argument for pgrep (ie, pgrep -f ) + # pattern = "nginx" + ## user as argument for pgrep (ie, pgrep -u ) + # user = "nginx" + ## Systemd unit name + # systemd_unit = "nginx.service" + ## CGroup name or path + # cgroup = "systemd/system.slice/nginx.service" + + ## Windows service name + # win_service = "" + + ## override for process_name + ## This is optional; default is sourced from /proc//status + # process_name = "bar" + + ## Field name prefix + # prefix = "" + + ## When true add the full cmdline as a tag. + # cmdline_tag = false + + ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. + # mode = "irix" + + ## Add the PID as a tag instead of as a field. When collecting multiple + ## processes with otherwise matching tags this setting should be enabled to + ## ensure each process has a unique identity. + ## + ## Enabling this option may result in a large number of series, especially + ## when processes have a short lifetime. + # pid_tag = false + + ## Method to use when finding process IDs. Can be one of 'pgrep', or + ## 'native'. The pgrep finder calls the pgrep executable in the PATH while + ## the native finder performs the search directly in a manor dependent on the + ## platform. Default is 'pgrep' + # pid_finder = "pgrep" `, - &Prometheus{}: `[[inputs.prometheus]] + &Prometheus{}: `[[inputs.prometheus]] ## An array of urls to scrape metrics from. urls = [] + + ## Metric version controls the mapping from Prometheus metrics into + ## Telegraf metrics. When using the prometheus_client output, use the same + ## value in both plugins to ensure metrics are round-tripped without + ## modification. + ## + ## example: metric_version = 1; + ## metric_version = 2; recommended version + # metric_version = 1 + + ## Url tag name (tag containing scrapped url. optional, default is "url") + # url_tag = "url" + + ## An array of Kubernetes services to scrape metrics from. + # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to 'https' & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + # monitor_kubernetes_pods = true + ## Get the list of pods to scrape with either the scope of + ## - cluster: the kubernetes watch api (default, no need to specify) + ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. + # pod_scrape_scope = "cluster" + ## Only for node scrape scope: node IP of the node that telegraf is running on. + ## Either this config or the environment variable NODE_IP must be set. + # node_ip = "10.180.1.1" + # ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. + # ## Default is 60 seconds. + # # pod_scrape_interval = 60 + ## Restricts Kubernetes monitoring to a single namespace + ## ex: monitor_kubernetes_pods_namespace = "default" + # monitor_kubernetes_pods_namespace = "" + # label selector to target pods which have the label + # kubernetes_label_selector = "env=dev,app=nginx" + # field selector to target pods + # eg. To scrape pods on a specific node + # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## HTTP Basic Authentication username and password. ('bearer_token' and + ## 'bearer_token_string' take priority) + # username = "" + # password = "" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + # response_timeout = "3s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false `, &Redis{}: `[[inputs.redis]] ## specify servers via a url matching: @@ -173,41 +431,146 @@ func TestEncodeTOML(t *testing.T) { ## If no port is specified, 6379 is used servers = [] + ## Optional. Specify redis commands to retrieve values + # [[inputs.redis.commands]] + # # The command to run where each argument is a separate element + # command = ["get", "sample-key"] + # # The field to store the result in + # field = "sample-key-value" + # # The type of the result + # # Can be "string", "integer", or "float" + # type = "string" + ## specify server password # password = "" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = true +`, + &SwapStats{}: `[[inputs.swap]] + # no configuration `, - &SwapStats{}: "[[inputs.swap]]\n", &Syslog{}: `[[inputs.syslog]] ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 ## Protocol, address and port to host the syslog receiver. ## If no host is specified, then localhost is used. ## If no port is specified, 6514 is used (RFC5425#section-4.1). server = "" + + ## TLS Config + # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Period between keep alive probes. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + ## Only applies to stream sockets (e.g. TCP). + # keep_alive_period = "5m" + + ## Maximum number of concurrent connections (default = 0). + ## 0 means unlimited. + ## Only applies to stream sockets (e.g. TCP). + # max_connections = 1024 + + ## Read timeout is the maximum time allowed for reading a single message (default = 5s). + ## 0 means unlimited. + # read_timeout = "5s" + + ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). + ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## or the non-transparent framing technique (RFC6587#section-3.4.2). + ## Must be one of "octet-counting", "non-transparent". + # framing = "octet-counting" + + ## The trailer to be expected in case of non-transparent framing (default = "LF"). + ## Must be one of "LF", or "NUL". + # trailer = "LF" + + ## Whether to parse in best effort mode or not (default = false). + ## By default best effort parsing is off. + # best_effort = false + + ## Character to prepend to SD-PARAMs (default = "_"). + ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. + ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] + ## For each combination a field is created. + ## Its name is created concatenating identifier, sdparam_separator, and parameter name. + # sdparam_separator = "_" `, - &SystemStats{}: "[[inputs.system]]\n", - &Tail{}: `[[inputs.tail]] - ## files to tail. + &SystemStats{}: `[[inputs.system]] + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] +`, + &Tail{}: `[[inputs.tail]] + ## File names or a pattern to tail. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: ## "/var/log/**.log" -> recursively find all .log files in /var/log ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log ## "/var/log/apache.log" -> just tail the apache log file - ## + ## "/var/log/log[!1-2]* -> tail files without 1-2 + ## "/var/log/log[^1-2]* -> identical behavior as above ## See https://github.com/gobwas/glob for more examples ## files = [] ## Read file from beginning. - from_beginning = false + # from_beginning = false + ## Whether file is a named pipe - pipe = false + # pipe = false + ## Method used to watch for file updates. Can be either "inotify" or "poll". # watch_method = "inotify" + + ## Maximum lines of the file to process that have not yet be written by the + ## output. For best throughput set based on the number of metrics on each + ## line and the size of the output's metric_batch_size. + # max_undelivered_lines = 1000 + + ## Character encoding to use when interpreting the file contents. Invalid + ## characters are replaced using the unicode replacement character. When set + ## to the empty string the data is not decoded to text. + ## ex: character_encoding = "utf-8" + ## character_encoding = "utf-16le" + ## character_encoding = "utf-16be" + ## character_encoding = "" + # character_encoding = "" + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + + ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. + # path_tag = "path" + + ## multiline parser/codec + ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html + #[inputs.tail.multiline] + ## The pattern should be a regexp which matches what you believe to be an + ## indicator that the field is part of an event consisting of multiple lines of log data. + #pattern = "^\s" + + ## This field must be either "previous" or "next". + ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, + ## whereas "next" indicates that the line belongs to the next one. + #match_which_line = "previous" + + ## The invert_match field can be true or false (defaults to false). + ## If true, a message not matching the pattern will constitute a match of the multiline + ## filter and the what will be applied. (vice-versa is also true) + #invert_match = false + + ## After the specified timeout, this plugin sends a multiline event even if no new pattern + ## is found to start a new event. The default timeout is 5s. + #timeout = 5s `, }, }, @@ -216,60 +579,98 @@ func TestEncodeTOML(t *testing.T) { plugins: map[telegrafPluginConfig]string{ &Docker{ Endpoint: "unix:///var/run/docker.sock", - }: `[[inputs.docker]] + }: `[[inputs.docker]] ## Docker Endpoint ## To use TCP, set endpoint = "tcp://[ip]:[port]" ## To use environment variables (ie, docker-machine), set endpoint = "ENV" - ## exp: unix:///var/run/docker.sock endpoint = "unix:///var/run/docker.sock" - + # ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) gather_services = false - + # ## Only collect metrics for these containers, collect all if empty container_names = [] - + # + ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars + source_tag = false + # ## Containers to include and exclude. Globs accepted. ## Note that an empty array for both will include all containers container_name_include = [] container_name_exclude = [] - + # ## Container states to include and exclude. Globs accepted. ## When empty only containers in the "running" state will be captured. + ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] + ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] # container_state_include = [] # container_state_exclude = [] - + # ## Timeout for docker list, info, and stats commands timeout = "5s" - - ## Whether to report for each container per-device blkio (8:0, 8:1...) and - ## network (eth0, eth1, ...) stats or not + # + ## Whether to report for each container per-device blkio (8:0, 8:1...), + ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. + ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting + ## is honored. perdevice = true - - ## Whether to report for each container total blkio and network stats or not + # + ## Specifies for which classes a per-device metric should be issued + ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) + ## Please note that this setting has no effect if 'perdevice' is set to 'true' + # perdevice_include = ["cpu"] + # + ## Whether to report for each container total blkio and network stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. + ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting + ## is honored. total = false - + # + ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. + ## Possible values are 'cpu', 'blkio' and 'network' + ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. + ## Please note that this setting has no effect if 'total' is set to 'false' + # total_include = ["cpu", "blkio", "network"] + # ## Which environment variables should we use as a tag ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] + # ## docker labels to include and exclude as tags. Globs accepted. ## Note that an empty array for both will include all labels as tags docker_label_include = [] docker_label_exclude = [] + # + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false `, &File{ Files: []string{ "/var/log/**.log", "/var/log/apache.log", }, - }: `[[inputs.file]] - ## Files to parse each interval. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## /var/log/**.log -> recursively find all .log files in /var/log - ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only read the apache log file + }: `[[inputs.file]] + ## Files to parse each interval. Accept standard unix glob matching rules, + ## as well as ** to match recursive files and directories. files = ["/var/log/**.log", "/var/log/apache.log"] + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. + # file_tag = "" + + ## Character encoding to use when interpreting the file contents. Invalid + ## characters are replaced using the unicode replacement character. When set + ## to the empty string the data is not decoded to text. + ## ex: character_encoding = "utf-8" + ## character_encoding = "utf-16le" + ## character_encoding = "utf-16be" + ## character_encoding = "" + # character_encoding = "" + ## The dataformat to be read from files ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -278,15 +679,36 @@ func TestEncodeTOML(t *testing.T) { `, &Kubernetes{URL: "http://1.1.1.1:10255"}: `[[inputs.kubernetes]] ## URL for the kubelet - ## exp: http://1.1.1.1:10255 - url = "http://1.1.1.1:10255" + url = "http://1.1.1.1:10255" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## Pod labels to be added as tags. An empty array for both include and + ## exclude will include all labels. + # label_include = [] + # label_exclude = ["*"] + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false `, &LogParserPlugin{ Files: []string{ "/var/log/**.log", "/var/log/apache.log", }, - }: `[[inputs.logparser]] + }: `[[inputs.logparser]] ## Log files to parse. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: @@ -299,8 +721,10 @@ func TestEncodeTOML(t *testing.T) { ## while telegraf is running (and that match the "files" globs) will always ## be read from the beginning. from_beginning = false + ## Method used to watch for file updates. Can be either "inotify" or "poll". # watch_method = "inotify" + ## Parse logstash-style "grok" patterns: [inputs.logparser.grok] ## This is a list of patterns to check the given log file(s) for. @@ -310,8 +734,31 @@ func TestEncodeTOML(t *testing.T) { ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) patterns = ["%{COMBINED_LOG_FORMAT}"] + ## Name of the outputted measurement name. measurement = "apache_access_log" + + ## Full path(s) to custom pattern files. + custom_pattern_files = [] + + ## Custom patterns can also be defined here. Put one pattern per line. + custom_patterns = ''' + ''' + + ## Timezone allows you to provide an override for timestamps that + ## don't already include an offset + ## e.g. 04/06/2016 12:41:45 data one two 5.43µs + ## + ## Default: "" which renders UTC + ## Options are as follows: + ## 1. Local -- interpret based on machine localtime + ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + ## 3. UTC -- or blank/unspecified, will return timestamp in UTC + # timezone = "Canada/Eastern" + + ## When set to "disable", timestamp will not incremented if there is a + ## duplicate. + # unique_timestamp = "auto" `, &Nginx{ URLs: []string{ @@ -320,23 +767,136 @@ func TestEncodeTOML(t *testing.T) { }, }: `[[inputs.nginx]] # An array of Nginx stub_status URI to gather stats. - # exp http://localhost/server_status urls = ["http://localhost/server_status", "http://192.168.1.1/server_status"] + + ## Optional TLS Config + tls_ca = "/etc/telegraf/ca.pem" + tls_cert = "/etc/telegraf/cert.cer" + tls_key = "/etc/telegraf/key.key" + ## Use TLS but skip chain & host verification + insecure_skip_verify = false + + # HTTP response timeout (default: 5s) + response_timeout = "5s" `, &Procstat{ Exe: "finder", }: `[[inputs.procstat]] + ## PID file to monitor process + pid_file = "/var/run/nginx.pid" ## executable name (ie, pgrep ) - exe = "finder" + # exe = "finder" + ## pattern as argument for pgrep (ie, pgrep -f ) + # pattern = "nginx" + ## user as argument for pgrep (ie, pgrep -u ) + # user = "nginx" + ## Systemd unit name + # systemd_unit = "nginx.service" + ## CGroup name or path + # cgroup = "systemd/system.slice/nginx.service" + + ## Windows service name + # win_service = "" + + ## override for process_name + ## This is optional; default is sourced from /proc//status + # process_name = "bar" + + ## Field name prefix + # prefix = "" + + ## When true add the full cmdline as a tag. + # cmdline_tag = false + + ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. + # mode = "irix" + + ## Add the PID as a tag instead of as a field. When collecting multiple + ## processes with otherwise matching tags this setting should be enabled to + ## ensure each process has a unique identity. + ## + ## Enabling this option may result in a large number of series, especially + ## when processes have a short lifetime. + # pid_tag = false + + ## Method to use when finding process IDs. Can be one of 'pgrep', or + ## 'native'. The pgrep finder calls the pgrep executable in the PATH while + ## the native finder performs the search directly in a manor dependent on the + ## platform. Default is 'pgrep' + # pid_finder = "pgrep" `, &Prometheus{ URLs: []string{ "http://192.168.2.1:9090", "http://192.168.2.2:9090", }, - }: `[[inputs.prometheus]] + }: `[[inputs.prometheus]] ## An array of urls to scrape metrics from. urls = ["http://192.168.2.1:9090", "http://192.168.2.2:9090"] + + ## Metric version controls the mapping from Prometheus metrics into + ## Telegraf metrics. When using the prometheus_client output, use the same + ## value in both plugins to ensure metrics are round-tripped without + ## modification. + ## + ## example: metric_version = 1; + ## metric_version = 2; recommended version + # metric_version = 1 + + ## Url tag name (tag containing scrapped url. optional, default is "url") + # url_tag = "url" + + ## An array of Kubernetes services to scrape metrics from. + # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to 'https' & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + # monitor_kubernetes_pods = true + ## Get the list of pods to scrape with either the scope of + ## - cluster: the kubernetes watch api (default, no need to specify) + ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. + # pod_scrape_scope = "cluster" + ## Only for node scrape scope: node IP of the node that telegraf is running on. + ## Either this config or the environment variable NODE_IP must be set. + # node_ip = "10.180.1.1" + # ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. + # ## Default is 60 seconds. + # # pod_scrape_interval = 60 + ## Restricts Kubernetes monitoring to a single namespace + ## ex: monitor_kubernetes_pods_namespace = "default" + # monitor_kubernetes_pods_namespace = "" + # label selector to target pods which have the label + # kubernetes_label_selector = "env=dev,app=nginx" + # field selector to target pods + # eg. To scrape pods on a specific node + # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## HTTP Basic Authentication username and password. ('bearer_token' and + ## 'bearer_token_string' take priority) + # username = "" + # password = "" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + # response_timeout = "3s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false `, &Redis{ Servers: []string{ @@ -356,8 +916,25 @@ func TestEncodeTOML(t *testing.T) { ## If no port is specified, 6379 is used servers = ["tcp://localhost:6379", "unix:///var/run/redis.sock"] + ## Optional. Specify redis commands to retrieve values + # [[inputs.redis.commands]] + # # The command to run where each argument is a separate element + # command = ["get", "sample-key"] + # # The field to store the result in + # field = "sample-key-value" + # # The type of the result + # # Can be "string", "integer", or "float" + # type = "string" + ## specify server password - password = "somepassword123" + # password = "somepassword123" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = true `, &Syslog{ Address: "tcp://10.0.0.1:6514", @@ -367,32 +944,115 @@ func TestEncodeTOML(t *testing.T) { ## If no host is specified, then localhost is used. ## If no port is specified, 6514 is used (RFC5425#section-4.1). server = "tcp://10.0.0.1:6514" + + ## TLS Config + # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Period between keep alive probes. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + ## Only applies to stream sockets (e.g. TCP). + # keep_alive_period = "5m" + + ## Maximum number of concurrent connections (default = 0). + ## 0 means unlimited. + ## Only applies to stream sockets (e.g. TCP). + # max_connections = 1024 + + ## Read timeout is the maximum time allowed for reading a single message (default = 5s). + ## 0 means unlimited. + # read_timeout = "5s" + + ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). + ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## or the non-transparent framing technique (RFC6587#section-3.4.2). + ## Must be one of "octet-counting", "non-transparent". + # framing = "octet-counting" + + ## The trailer to be expected in case of non-transparent framing (default = "LF"). + ## Must be one of "LF", or "NUL". + # trailer = "LF" + + ## Whether to parse in best effort mode or not (default = false). + ## By default best effort parsing is off. + # best_effort = false + + ## Character to prepend to SD-PARAMs (default = "_"). + ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. + ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] + ## For each combination a field is created. + ## Its name is created concatenating identifier, sdparam_separator, and parameter name. + # sdparam_separator = "_" `, &Tail{ Files: []string{"/var/log/**.log", "/var/log/apache.log"}, - }: `[[inputs.tail]] - ## files to tail. + }: `[[inputs.tail]] + ## File names or a pattern to tail. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: ## "/var/log/**.log" -> recursively find all .log files in /var/log ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log ## "/var/log/apache.log" -> just tail the apache log file - ## + ## "/var/log/log[!1-2]* -> tail files without 1-2 + ## "/var/log/log[^1-2]* -> identical behavior as above ## See https://github.com/gobwas/glob for more examples ## files = ["/var/log/**.log", "/var/log/apache.log"] ## Read file from beginning. - from_beginning = false + # from_beginning = false + ## Whether file is a named pipe - pipe = false + # pipe = false + ## Method used to watch for file updates. Can be either "inotify" or "poll". # watch_method = "inotify" + + ## Maximum lines of the file to process that have not yet be written by the + ## output. For best throughput set based on the number of metrics on each + ## line and the size of the output's metric_batch_size. + # max_undelivered_lines = 1000 + + ## Character encoding to use when interpreting the file contents. Invalid + ## characters are replaced using the unicode replacement character. When set + ## to the empty string the data is not decoded to text. + ## ex: character_encoding = "utf-8" + ## character_encoding = "utf-16le" + ## character_encoding = "utf-16be" + ## character_encoding = "" + # character_encoding = "" + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + + ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. + # path_tag = "path" + + ## multiline parser/codec + ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html + #[inputs.tail.multiline] + ## The pattern should be a regexp which matches what you believe to be an + ## indicator that the field is part of an event consisting of multiple lines of log data. + #pattern = "^\s" + + ## This field must be either "previous" or "next". + ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, + ## whereas "next" indicates that the line belongs to the next one. + #match_which_line = "previous" + + ## The invert_match field can be true or false (defaults to false). + ## If true, a message not matching the pattern will constitute a match of the multiline + ## filter and the what will be applied. (vice-versa is also true) + #invert_match = false + + ## After the specified timeout, this plugin sends a multiline event even if no new pattern + ## is found to start a new event. The default timeout is 5s. + #timeout = 5s `, }, }, diff --git a/telegraf/plugins/inputs/kernel.go b/telegraf/plugins/inputs/kernel.go index 835cfb7167..663337f49b 100644 --- a/telegraf/plugins/inputs/kernel.go +++ b/telegraf/plugins/inputs/kernel.go @@ -17,6 +17,7 @@ func (k *Kernel) PluginName() string { // TOML encodes to toml string func (k *Kernel) TOML() string { return fmt.Sprintf(`[[inputs.%s]] + # no configuration `, k.PluginName()) } diff --git a/telegraf/plugins/inputs/kubernetes.go b/telegraf/plugins/inputs/kubernetes.go index db9fabc639..329e025855 100644 --- a/telegraf/plugins/inputs/kubernetes.go +++ b/telegraf/plugins/inputs/kubernetes.go @@ -20,8 +20,29 @@ func (k *Kubernetes) PluginName() string { func (k *Kubernetes) TOML() string { return fmt.Sprintf(`[[inputs.%s]] ## URL for the kubelet - ## exp: http://1.1.1.1:10255 - url = "%s" + url = "%s" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## Pod labels to be added as tags. An empty array for both include and + ## exclude will include all labels. + # label_include = [] + # label_exclude = ["*"] + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false `, k.PluginName(), k.URL) } diff --git a/telegraf/plugins/inputs/logparser.go b/telegraf/plugins/inputs/logparser.go index 0977ba8743..367057650a 100644 --- a/telegraf/plugins/inputs/logparser.go +++ b/telegraf/plugins/inputs/logparser.go @@ -24,7 +24,7 @@ func (l *LogParserPlugin) TOML() string { for k, v := range l.Files { s[k] = strconv.Quote(v) } - return fmt.Sprintf(`[[inputs.%s]] + return fmt.Sprintf(`[[inputs.%s]] ## Log files to parse. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: @@ -37,8 +37,10 @@ func (l *LogParserPlugin) TOML() string { ## while telegraf is running (and that match the "files" globs) will always ## be read from the beginning. from_beginning = false + ## Method used to watch for file updates. Can be either "inotify" or "poll". # watch_method = "inotify" + ## Parse logstash-style "grok" patterns: [inputs.logparser.grok] ## This is a list of patterns to check the given log file(s) for. @@ -48,8 +50,31 @@ func (l *LogParserPlugin) TOML() string { ## %%{COMMON_LOG_FORMAT} (plain apache & nginx access logs) ## %%{COMBINED_LOG_FORMAT} (access logs + referrer & agent) patterns = ["%%{COMBINED_LOG_FORMAT}"] + ## Name of the outputted measurement name. measurement = "apache_access_log" + + ## Full path(s) to custom pattern files. + custom_pattern_files = [] + + ## Custom patterns can also be defined here. Put one pattern per line. + custom_patterns = ''' + ''' + + ## Timezone allows you to provide an override for timestamps that + ## don't already include an offset + ## e.g. 04/06/2016 12:41:45 data one two 5.43µs + ## + ## Default: "" which renders UTC + ## Options are as follows: + ## 1. Local -- interpret based on machine localtime + ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + ## 3. UTC -- or blank/unspecified, will return timestamp in UTC + # timezone = "Canada/Eastern" + + ## When set to "disable", timestamp will not incremented if there is a + ## duplicate. + # unique_timestamp = "auto" `, l.PluginName(), strings.Join(s, ", ")) } diff --git a/telegraf/plugins/inputs/mem.go b/telegraf/plugins/inputs/mem.go index ae08570d65..2e1620df7e 100644 --- a/telegraf/plugins/inputs/mem.go +++ b/telegraf/plugins/inputs/mem.go @@ -17,6 +17,7 @@ func (m *MemStats) PluginName() string { // TOML encodes to toml string func (m *MemStats) TOML() string { return fmt.Sprintf(`[[inputs.%s]] + # no configuration `, m.PluginName()) } diff --git a/telegraf/plugins/inputs/net.go b/telegraf/plugins/inputs/net.go index 85bd74c9d4..65f9a8c6e6 100644 --- a/telegraf/plugins/inputs/net.go +++ b/telegraf/plugins/inputs/net.go @@ -17,6 +17,17 @@ func (n *NetIOStats) PluginName() string { // TOML encodes to toml string func (n *NetIOStats) TOML() string { return fmt.Sprintf(`[[inputs.%s]] + ## By default, telegraf gathers stats from any up interface (excluding loopback) + ## Setting interfaces will tell it to gather these explicit interfaces, + ## regardless of status. + ## + # interfaces = ["eth0"] + ## + ## On linux systems telegraf also collects protocol stats. + ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. + ## + # ignore_protocol_stats = false + ## `, n.PluginName()) } diff --git a/telegraf/plugins/inputs/net_response.go b/telegraf/plugins/inputs/net_response.go index cd3e9d879b..cdf42602b9 100644 --- a/telegraf/plugins/inputs/net_response.go +++ b/telegraf/plugins/inputs/net_response.go @@ -23,6 +23,23 @@ func (n *NetResponse) TOML() string { protocol = "tcp" ## Server address (default localhost) address = "localhost:80" + + ## Set timeout + # timeout = "1s" + + ## Set read timeout (only used if expecting a response) + # read_timeout = "1s" + + ## The following options are required for UDP checks. For TCP, they are + ## optional. The plugin will send the given string to the server and then + ## expect to receive the given 'expect' string back. + ## string sent to the server + # send = "ssh" + ## expected string in answer + # expect = "ssh" + + ## Uncomment to remove deprecated fields + # fielddrop = ["result_type", "string_found"] `, n.PluginName()) } diff --git a/telegraf/plugins/inputs/ngnix.go b/telegraf/plugins/inputs/nginx.go similarity index 78% rename from telegraf/plugins/inputs/ngnix.go rename to telegraf/plugins/inputs/nginx.go index 1929fbccc7..ea8a2dc44c 100644 --- a/telegraf/plugins/inputs/ngnix.go +++ b/telegraf/plugins/inputs/nginx.go @@ -26,8 +26,17 @@ func (n *Nginx) TOML() string { } return fmt.Sprintf(`[[inputs.%s]] # An array of Nginx stub_status URI to gather stats. - # exp http://localhost/server_status urls = [%s] + + ## Optional TLS Config + tls_ca = "/etc/telegraf/ca.pem" + tls_cert = "/etc/telegraf/cert.cer" + tls_key = "/etc/telegraf/key.key" + ## Use TLS but skip chain & host verification + insecure_skip_verify = false + + # HTTP response timeout (default: 5s) + response_timeout = "5s" `, n.PluginName(), strings.Join(s, ", ")) } diff --git a/telegraf/plugins/inputs/processes.go b/telegraf/plugins/inputs/processes.go index d258964092..f097fa7fab 100644 --- a/telegraf/plugins/inputs/processes.go +++ b/telegraf/plugins/inputs/processes.go @@ -17,6 +17,7 @@ func (p *Processes) PluginName() string { // TOML encodes to toml string func (p *Processes) TOML() string { return fmt.Sprintf(`[[inputs.%s]] + # no configuration `, p.PluginName()) } diff --git a/telegraf/plugins/inputs/procstat.go b/telegraf/plugins/inputs/procstat.go new file mode 100644 index 0000000000..e394a925da --- /dev/null +++ b/telegraf/plugins/inputs/procstat.go @@ -0,0 +1,75 @@ +package inputs + +import ( + "errors" + "fmt" +) + +// Procstat is based on telegraf procstat input plugin. +type Procstat struct { + baseInput + Exe string `json:"exe"` +} + +// PluginName is based on telegraf plugin name. +func (p *Procstat) PluginName() string { + return "procstat" +} + +// TOML encodes to toml string. +func (p *Procstat) TOML() string { + return fmt.Sprintf(`[[inputs.%s]] + ## PID file to monitor process + pid_file = "/var/run/nginx.pid" + ## executable name (ie, pgrep ) + # exe = "%s" + ## pattern as argument for pgrep (ie, pgrep -f ) + # pattern = "nginx" + ## user as argument for pgrep (ie, pgrep -u ) + # user = "nginx" + ## Systemd unit name + # systemd_unit = "nginx.service" + ## CGroup name or path + # cgroup = "systemd/system.slice/nginx.service" + + ## Windows service name + # win_service = "" + + ## override for process_name + ## This is optional; default is sourced from /proc//status + # process_name = "bar" + + ## Field name prefix + # prefix = "" + + ## When true add the full cmdline as a tag. + # cmdline_tag = false + + ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. + # mode = "irix" + + ## Add the PID as a tag instead of as a field. When collecting multiple + ## processes with otherwise matching tags this setting should be enabled to + ## ensure each process has a unique identity. + ## + ## Enabling this option may result in a large number of series, especially + ## when processes have a short lifetime. + # pid_tag = false + + ## Method to use when finding process IDs. Can be one of 'pgrep', or + ## 'native'. The pgrep finder calls the pgrep executable in the PATH while + ## the native finder performs the search directly in a manor dependent on the + ## platform. Default is 'pgrep' + # pid_finder = "pgrep" +`, p.PluginName(), p.Exe) +} + +// UnmarshalTOML decodes the parsed data to the object +func (p *Procstat) UnmarshalTOML(data interface{}) error { + dataOK, ok := data.(map[string]interface{}) + if !ok { + return errors.New("bad exe for procstat input plugin") + } + p.Exe, _ = dataOK["exe"].(string) + return nil +} diff --git a/telegraf/plugins/inputs/procstats.go b/telegraf/plugins/inputs/procstats.go deleted file mode 100644 index c40916144a..0000000000 --- a/telegraf/plugins/inputs/procstats.go +++ /dev/null @@ -1,35 +0,0 @@ -package inputs - -import ( - "errors" - "fmt" -) - -// Procstat is based on telegraf procstat input plugin. -type Procstat struct { - baseInput - Exe string `json:"exe"` -} - -// PluginName is based on telegraf plugin name. -func (p *Procstat) PluginName() string { - return "procstat" -} - -// TOML encodes to toml string. -func (p *Procstat) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - ## executable name (ie, pgrep ) - exe = "%s" -`, p.PluginName(), p.Exe) -} - -// UnmarshalTOML decodes the parsed data to the object -func (p *Procstat) UnmarshalTOML(data interface{}) error { - dataOK, ok := data.(map[string]interface{}) - if !ok { - return errors.New("bad exe for procstat input plugin") - } - p.Exe, _ = dataOK["exe"].(string) - return nil -} diff --git a/telegraf/plugins/inputs/prometheus.go b/telegraf/plugins/inputs/prometheus.go index 21225341ce..c00721c1b3 100644 --- a/telegraf/plugins/inputs/prometheus.go +++ b/telegraf/plugins/inputs/prometheus.go @@ -24,9 +24,73 @@ func (p *Prometheus) TOML() string { for k, v := range p.URLs { s[k] = strconv.Quote(v) } - return fmt.Sprintf(`[[inputs.%s]] + return fmt.Sprintf(`[[inputs.%s]] ## An array of urls to scrape metrics from. urls = [%s] + + ## Metric version controls the mapping from Prometheus metrics into + ## Telegraf metrics. When using the prometheus_client output, use the same + ## value in both plugins to ensure metrics are round-tripped without + ## modification. + ## + ## example: metric_version = 1; + ## metric_version = 2; recommended version + # metric_version = 1 + + ## Url tag name (tag containing scrapped url. optional, default is "url") + # url_tag = "url" + + ## An array of Kubernetes services to scrape metrics from. + # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to 'https' & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + # monitor_kubernetes_pods = true + ## Get the list of pods to scrape with either the scope of + ## - cluster: the kubernetes watch api (default, no need to specify) + ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. + # pod_scrape_scope = "cluster" + ## Only for node scrape scope: node IP of the node that telegraf is running on. + ## Either this config or the environment variable NODE_IP must be set. + # node_ip = "10.180.1.1" + # ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. + # ## Default is 60 seconds. + # # pod_scrape_interval = 60 + ## Restricts Kubernetes monitoring to a single namespace + ## ex: monitor_kubernetes_pods_namespace = "default" + # monitor_kubernetes_pods_namespace = "" + # label selector to target pods which have the label + # kubernetes_label_selector = "env=dev,app=nginx" + # field selector to target pods + # eg. To scrape pods on a specific node + # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## HTTP Basic Authentication username and password. ('bearer_token' and + ## 'bearer_token_string' take priority) + # username = "" + # password = "" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + # response_timeout = "3s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false `, p.PluginName(), strings.Join(s, ", ")) } diff --git a/telegraf/plugins/inputs/redis.go b/telegraf/plugins/inputs/redis.go index 3106c1fc52..3c148a6510 100644 --- a/telegraf/plugins/inputs/redis.go +++ b/telegraf/plugins/inputs/redis.go @@ -27,7 +27,7 @@ func (r *Redis) TOML() string { } password := ` # password = ""` if r.Password != "" { - password = fmt.Sprintf(` password = "%s"`, r.Password) + password = fmt.Sprintf(` # password = "%s"`, r.Password) } return fmt.Sprintf(`[[inputs.%s]] ## specify servers via a url matching: @@ -41,8 +41,25 @@ func (r *Redis) TOML() string { ## If no port is specified, 6379 is used servers = [%s] + ## Optional. Specify redis commands to retrieve values + # [[inputs.redis.commands]] + # # The command to run where each argument is a separate element + # command = ["get", "sample-key"] + # # The field to store the result in + # field = "sample-key-value" + # # The type of the result + # # Can be "string", "integer", or "float" + # type = "string" + ## specify server password %s + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = true `, r.PluginName(), strings.Join(s, ", "), password) } diff --git a/telegraf/plugins/inputs/swap.go b/telegraf/plugins/inputs/swap.go index 244caff77e..2e704188b2 100644 --- a/telegraf/plugins/inputs/swap.go +++ b/telegraf/plugins/inputs/swap.go @@ -17,6 +17,7 @@ func (s *SwapStats) PluginName() string { // TOML encodes to toml string. func (s *SwapStats) TOML() string { return fmt.Sprintf(`[[inputs.%s]] + # no configuration `, s.PluginName()) } diff --git a/telegraf/plugins/inputs/syslog.go b/telegraf/plugins/inputs/syslog.go index d9a52a9c0c..4b4e4c5fd0 100644 --- a/telegraf/plugins/inputs/syslog.go +++ b/telegraf/plugins/inputs/syslog.go @@ -24,6 +24,47 @@ func (s *Syslog) TOML() string { ## If no host is specified, then localhost is used. ## If no port is specified, 6514 is used (RFC5425#section-4.1). server = "%s" + + ## TLS Config + # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Period between keep alive probes. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + ## Only applies to stream sockets (e.g. TCP). + # keep_alive_period = "5m" + + ## Maximum number of concurrent connections (default = 0). + ## 0 means unlimited. + ## Only applies to stream sockets (e.g. TCP). + # max_connections = 1024 + + ## Read timeout is the maximum time allowed for reading a single message (default = 5s). + ## 0 means unlimited. + # read_timeout = "5s" + + ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). + ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## or the non-transparent framing technique (RFC6587#section-3.4.2). + ## Must be one of "octet-counting", "non-transparent". + # framing = "octet-counting" + + ## The trailer to be expected in case of non-transparent framing (default = "LF"). + ## Must be one of "LF", or "NUL". + # trailer = "LF" + + ## Whether to parse in best effort mode or not (default = false). + ## By default best effort parsing is off. + # best_effort = false + + ## Character to prepend to SD-PARAMs (default = "_"). + ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. + ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] + ## For each combination a field is created. + ## Its name is created concatenating identifier, sdparam_separator, and parameter name. + # sdparam_separator = "_" `, s.PluginName(), s.Address) } diff --git a/telegraf/plugins/inputs/system.go b/telegraf/plugins/inputs/system.go index 84742382be..cfecc8aacc 100644 --- a/telegraf/plugins/inputs/system.go +++ b/telegraf/plugins/inputs/system.go @@ -17,6 +17,8 @@ func (s *SystemStats) PluginName() string { // TOML encodes to toml string func (s *SystemStats) TOML() string { return fmt.Sprintf(`[[inputs.%s]] + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] `, s.PluginName()) } diff --git a/telegraf/plugins/inputs/tail.go b/telegraf/plugins/inputs/tail.go index 3f5f7a0854..b91f1bb28c 100644 --- a/telegraf/plugins/inputs/tail.go +++ b/telegraf/plugins/inputs/tail.go @@ -24,29 +24,71 @@ func (t *Tail) TOML() string { for k, v := range t.Files { s[k] = strconv.Quote(v) } - return fmt.Sprintf(`[[inputs.%s]] - ## files to tail. + return fmt.Sprintf(`[[inputs.%s]] + ## File names or a pattern to tail. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: ## "/var/log/**.log" -> recursively find all .log files in /var/log ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log ## "/var/log/apache.log" -> just tail the apache log file - ## + ## "/var/log/log[!1-2]* -> tail files without 1-2 + ## "/var/log/log[^1-2]* -> identical behavior as above ## See https://github.com/gobwas/glob for more examples ## files = [%s] ## Read file from beginning. - from_beginning = false + # from_beginning = false + ## Whether file is a named pipe - pipe = false + # pipe = false + ## Method used to watch for file updates. Can be either "inotify" or "poll". # watch_method = "inotify" + + ## Maximum lines of the file to process that have not yet be written by the + ## output. For best throughput set based on the number of metrics on each + ## line and the size of the output's metric_batch_size. + # max_undelivered_lines = 1000 + + ## Character encoding to use when interpreting the file contents. Invalid + ## characters are replaced using the unicode replacement character. When set + ## to the empty string the data is not decoded to text. + ## ex: character_encoding = "utf-8" + ## character_encoding = "utf-16le" + ## character_encoding = "utf-16be" + ## character_encoding = "" + # character_encoding = "" + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + + ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. + # path_tag = "path" + + ## multiline parser/codec + ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html + #[inputs.tail.multiline] + ## The pattern should be a regexp which matches what you believe to be an + ## indicator that the field is part of an event consisting of multiple lines of log data. + #pattern = "^\s" + + ## This field must be either "previous" or "next". + ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, + ## whereas "next" indicates that the line belongs to the next one. + #match_which_line = "previous" + + ## The invert_match field can be true or false (defaults to false). + ## If true, a message not matching the pattern will constitute a match of the multiline + ## filter and the what will be applied. (vice-versa is also true) + #invert_match = false + + ## After the specified timeout, this plugin sends a multiline event even if no new pattern + ## is found to start a new event. The default timeout is 5s. + #timeout = 5s `, t.PluginName(), strings.Join(s, ", ")) } diff --git a/telegraf/plugins/outputs/file.go b/telegraf/plugins/outputs/file.go index 5c63a020b3..426323ae71 100644 --- a/telegraf/plugins/outputs/file.go +++ b/telegraf/plugins/outputs/file.go @@ -13,7 +13,7 @@ type File struct { Files []FileConfig `json:"files"` } -// FileConfig is the config settings of output file plugin. +// FileConfig is the config settings of outpu file plugin. type FileConfig struct { Typ string `json:"type"` Path string `json:"path"` @@ -37,6 +37,29 @@ func (f *File) TOML() string { return fmt.Sprintf(`[[outputs.%s]] ## Files to write to, "stdout" is a specially handled file. files = [%s] + + ## Use batch serialization format instead of line based delimiting. The + ## batch format allows for the production of non line based output formats and + ## may more efficiently encode metric groups. + # use_batch_format = false + + ## The file will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. + # rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # rotation_max_archives = 5 + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" `, f.PluginName(), strings.Join(s, ", ")) } diff --git a/telegraf/plugins/outputs/influxdb_v2.go b/telegraf/plugins/outputs/influxdb_v2.go index 3f6171eca4..c4985a8cc1 100644 --- a/telegraf/plugins/outputs/influxdb_v2.go +++ b/telegraf/plugins/outputs/influxdb_v2.go @@ -27,12 +27,12 @@ func (i *InfluxDBV2) TOML() string { for k, v := range i.URLs { s[k] = strconv.Quote(v) } - return fmt.Sprintf(`[[outputs.%s]] + return fmt.Sprintf(`[[outputs.%s]] ## The URLs of the InfluxDB cluster nodes. ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:8086 + ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] urls = [%s] ## Token for authentication. @@ -43,6 +43,40 @@ func (i *InfluxDBV2) TOML() string { ## Destination bucket to write into. bucket = "%s" + + ## The value of this tag will be used to determine the bucket. If this + ## tag is not set the 'bucket' option is used as the default. + # bucket_tag = "" + + ## If true, the bucket tag will not be added to the metric. + # exclude_bucket_tag = false + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "gzip" + + ## Enable or disable uint support for writing uints influxdb 2.0. + # influx_uint_support = false + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false `, i.PluginName(), strings.Join(s, ", "), i.Token, i.Organization, i.Bucket) } diff --git a/telegraf/plugins/outputs/outputs_test.go b/telegraf/plugins/outputs/outputs_test.go index cc70e0562a..92a44d5795 100644 --- a/telegraf/plugins/outputs/outputs_test.go +++ b/telegraf/plugins/outputs/outputs_test.go @@ -34,13 +34,36 @@ func TestTOML(t *testing.T) { &File{}: `[[outputs.file]] ## Files to write to, "stdout" is a specially handled file. files = [] + + ## Use batch serialization format instead of line based delimiting. The + ## batch format allows for the production of non line based output formats and + ## may more efficiently encode metric groups. + # use_batch_format = false + + ## The file will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. + # rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # rotation_max_archives = 5 + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" `, - &InfluxDBV2{}: `[[outputs.influxdb_v2]] + &InfluxDBV2{}: `[[outputs.influxdb_v2]] ## The URLs of the InfluxDB cluster nodes. ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:8086 + ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] urls = [] ## Token for authentication. @@ -51,6 +74,40 @@ func TestTOML(t *testing.T) { ## Destination bucket to write into. bucket = "" + + ## The value of this tag will be used to determine the bucket. If this + ## tag is not set the 'bucket' option is used as the default. + # bucket_tag = "" + + ## If true, the bucket tag will not be added to the metric. + # exclude_bucket_tag = false + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "gzip" + + ## Enable or disable uint support for writing uints influxdb 2.0. + # influx_uint_support = false + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false `, }, }, @@ -65,22 +122,45 @@ func TestTOML(t *testing.T) { }: `[[outputs.file]] ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/out.txt"] + + ## Use batch serialization format instead of line based delimiting. The + ## batch format allows for the production of non line based output formats and + ## may more efficiently encode metric groups. + # use_batch_format = false + + ## The file will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. + # rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # rotation_max_archives = 5 + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" `, &InfluxDBV2{ URLs: []string{ - "http://192.168.1.10:8086", - "http://192.168.1.11:8086", + "http://192.168.1.10:9999", + "http://192.168.1.11:9999", }, Token: "tok1", Organization: "org1", Bucket: "bucket1", - }: `[[outputs.influxdb_v2]] + }: `[[outputs.influxdb_v2]] ## The URLs of the InfluxDB cluster nodes. ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:8086 - urls = ["http://192.168.1.10:8086", "http://192.168.1.11:8086"] + ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] + urls = ["http://192.168.1.10:9999", "http://192.168.1.11:9999"] ## Token for authentication. token = "tok1" @@ -90,6 +170,40 @@ func TestTOML(t *testing.T) { ## Destination bucket to write into. bucket = "bucket1" + + ## The value of this tag will be used to determine the bucket. If this + ## tag is not set the 'bucket' option is used as the default. + # bucket_tag = "" + + ## If true, the bucket tag will not be added to the metric. + # exclude_bucket_tag = false + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "gzip" + + ## Enable or disable uint support for writing uints influxdb 2.0. + # influx_uint_support = false + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false `, }, }, @@ -161,16 +275,16 @@ func TestDecodeTOML(t *testing.T) { name: "influxdb_v2 missing token", want: &InfluxDBV2{ URLs: []string{ - "http://localhost:8086", - "http://192.168.0.1:8086", + "http://localhost:9999", + "http://192.168.0.1:9999", }, }, wantErr: errors.New("token is missing for influxdb_v2 output plugin"), output: &InfluxDBV2{}, data: map[string]interface{}{ "urls": []interface{}{ - "http://localhost:8086", - "http://192.168.0.1:8086", + "http://localhost:9999", + "http://192.168.0.1:9999", }, }, }, @@ -178,8 +292,8 @@ func TestDecodeTOML(t *testing.T) { name: "influxdb_v2 missing org", want: &InfluxDBV2{ URLs: []string{ - "http://localhost:8086", - "http://192.168.0.1:8086", + "http://localhost:9999", + "http://192.168.0.1:9999", }, Token: "token1", }, @@ -187,8 +301,8 @@ func TestDecodeTOML(t *testing.T) { output: &InfluxDBV2{}, data: map[string]interface{}{ "urls": []interface{}{ - "http://localhost:8086", - "http://192.168.0.1:8086", + "http://localhost:9999", + "http://192.168.0.1:9999", }, "token": "token1", }, @@ -197,8 +311,8 @@ func TestDecodeTOML(t *testing.T) { name: "influxdb_v2 missing bucket", want: &InfluxDBV2{ URLs: []string{ - "http://localhost:8086", - "http://192.168.0.1:8086", + "http://localhost:9999", + "http://192.168.0.1:9999", }, Token: "token1", Organization: "org1", @@ -207,8 +321,8 @@ func TestDecodeTOML(t *testing.T) { output: &InfluxDBV2{}, data: map[string]interface{}{ "urls": []interface{}{ - "http://localhost:8086", - "http://192.168.0.1:8086", + "http://localhost:9999", + "http://192.168.0.1:9999", }, "token": "token1", "organization": "org1", @@ -218,8 +332,8 @@ func TestDecodeTOML(t *testing.T) { name: "influxdb_v2", want: &InfluxDBV2{ URLs: []string{ - "http://localhost:8086", - "http://192.168.0.1:8086", + "http://localhost:9999", + "http://192.168.0.1:9999", }, Token: "token1", Organization: "org1", @@ -228,8 +342,8 @@ func TestDecodeTOML(t *testing.T) { output: &InfluxDBV2{}, data: map[string]interface{}{ "urls": []interface{}{ - "http://localhost:8086", - "http://192.168.0.1:8086", + "http://localhost:9999", + "http://192.168.0.1:9999", }, "token": "token1", "organization": "org1", diff --git a/telegraf/plugins/plugins.go b/telegraf/plugins/plugins.go index cde0021156..672190222a 100644 --- a/telegraf/plugins/plugins.go +++ b/telegraf/plugins/plugins.go @@ -197,10 +197,9 @@ var AgentConfig = `# Configuration for telegraf agent ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. @@ -226,13 +225,36 @@ var AgentConfig = `# Configuration for telegraf agent ## Valid time units are "ns", "us" (or "µs"), "ms", "s". precision = "" - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" ## Override default hostname, if empty use os.Hostname() hostname = "" @@ -908,7 +930,7 @@ var availableInputs = `{ "type": "input", "name": "httpjson", "description": "Read flattened metrics from one or more JSON HTTP endpoints", - "config": "# Read flattened metrics from one or more JSON HTTP endpoints\n[[inputs.httpjson]]\n # alias=\"httpjson\"\n ## NOTE This plugin only reads numerical measurements, strings and booleans\n ## will be ignored.\n\n ## Name for the service being polled. Will be appended to the name of the\n ## measurement e.g. httpjson_webserver_stats\n ##\n ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.\n name = \"webserver_stats\"\n\n ## URL of each server in the service's cluster\n servers = [\n \"http://localhost:8086/stats/\",\n \"http://localhost:9998/stats/\",\n ]\n ## Set response_timeout (default 5 seconds)\n response_timeout = \"5s\"\n\n ## HTTP method to use: GET or POST (case-sensitive)\n method = \"GET\"\n\n ## List of tag names to extract from top-level of JSON server response\n # tag_keys = [\n # \"my_tag_1\",\n # \"my_tag_2\"\n # ]\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## HTTP parameters (all values must be strings). For \"GET\" requests, data\n ## will be included in the query. For \"POST\" requests, data will be included\n ## in the request body as \"x-www-form-urlencoded\".\n # [inputs.httpjson.parameters]\n # event_type = \"cpu_spike\"\n # threshold = \"0.75\"\n\n ## HTTP Headers (all values must be strings)\n # [inputs.httpjson.headers]\n # X-Auth-Token = \"my-xauth-token\"\n # apiVersion = \"v1\"\n\n" + "config": "# Read flattened metrics from one or more JSON HTTP endpoints\n[[inputs.httpjson]]\n # alias=\"httpjson\"\n ## NOTE This plugin only reads numerical measurements, strings and booleans\n ## will be ignored.\n\n ## Name for the service being polled. Will be appended to the name of the\n ## measurement e.g. httpjson_webserver_stats\n ##\n ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.\n name = \"webserver_stats\"\n\n ## URL of each server in the service's cluster\n servers = [\n \"http://localhost:9999/stats/\",\n \"http://localhost:9998/stats/\",\n ]\n ## Set response_timeout (default 5 seconds)\n response_timeout = \"5s\"\n\n ## HTTP method to use: GET or POST (case-sensitive)\n method = \"GET\"\n\n ## List of tag names to extract from top-level of JSON server response\n # tag_keys = [\n # \"my_tag_1\",\n # \"my_tag_2\"\n # ]\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## HTTP parameters (all values must be strings). For \"GET\" requests, data\n ## will be included in the query. For \"POST\" requests, data will be included\n ## in the request body as \"x-www-form-urlencoded\".\n # [inputs.httpjson.parameters]\n # event_type = \"cpu_spike\"\n # threshold = \"0.75\"\n\n ## HTTP Headers (all values must be strings)\n # [inputs.httpjson.headers]\n # X-Auth-Token = \"my-xauth-token\"\n # apiVersion = \"v1\"\n\n" }, { "type": "input", @@ -1040,7 +1062,7 @@ var availableInputs = `{ "type": "input", "name": "jolokia2_proxy", "description": "Read JMX metrics from a Jolokia REST proxy endpoint", - "config": "# Read JMX metrics from a Jolokia REST proxy endpoint\n[[inputs.jolokia2_proxy]]\n # alias=\"jolokia2_proxy\"\n # default_tag_prefix = \"\"\n # default_field_prefix = \"\"\n # default_field_separator = \".\"\n\n ## Proxy agent\n url = \"http://localhost:8080/jolokia\"\n # username = \"\"\n # password = \"\"\n # response_timeout = \"5s\"\n\n ## Optional TLS config\n # tls_ca = \"/var/private/ca.pem\"\n # tls_cert = \"/var/private/client.pem\"\n # tls_key = \"/var/private/client-key.pem\"\n # insecure_skip_verify = false\n\n ## Add proxy targets to query\n # default_target_username = \"\"\n # default_target_password = \"\"\n [[inputs.jolokia2_proxy.target]]\n url = \"service:jmx:rmi:///jndi/rmi://targethost:8086/jmxrmi\"\n # username = \"\"\n # password = \"\"\n\n ## Add metrics to read\n [[inputs.jolokia2_proxy.metric]]\n name = \"java_runtime\"\n mbean = \"java.lang:type=Runtime\"\n paths = [\"Uptime\"]\n\n" + "config": "# Read JMX metrics from a Jolokia REST proxy endpoint\n[[inputs.jolokia2_proxy]]\n # alias=\"jolokia2_proxy\"\n # default_tag_prefix = \"\"\n # default_field_prefix = \"\"\n # default_field_separator = \".\"\n\n ## Proxy agent\n url = \"http://localhost:8080/jolokia\"\n # username = \"\"\n # password = \"\"\n # response_timeout = \"5s\"\n\n ## Optional TLS config\n # tls_ca = \"/var/private/ca.pem\"\n # tls_cert = \"/var/private/client.pem\"\n # tls_key = \"/var/private/client-key.pem\"\n # insecure_skip_verify = false\n\n ## Add proxy targets to query\n # default_target_username = \"\"\n # default_target_password = \"\"\n [[inputs.jolokia2_proxy.target]]\n url = \"service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi\"\n # username = \"\"\n # password = \"\"\n\n ## Add metrics to read\n [[inputs.jolokia2_proxy.metric]]\n name = \"java_runtime\"\n mbean = \"java.lang:type=Runtime\"\n paths = [\"Uptime\"]\n\n" }, { "type": "input", @@ -1437,7 +1459,7 @@ var availableOutputs = `{ "type": "output", "name": "influxdb_v2", "description": "Configuration for sending metrics to InfluxDB", - "config": "# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:8086\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" + "config": "# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:9999\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" }, { "type": "output", diff --git a/telegraf_test.go b/telegraf_test.go index 0b7cc5d129..47729ef978 100644 --- a/telegraf_test.go +++ b/telegraf_test.go @@ -34,7 +34,7 @@ var telegrafCmpOptions = cmp.Options{ }), } -// tests backwards compatibility with the current plugin aware system. +// tests backwards compatibillity with the current plugin aware system. func TestTelegrafConfigJSONDecodeWithoutID(t *testing.T) { s := `{ "name": "config 2", @@ -65,7 +65,7 @@ func TestTelegrafConfigJSONDecodeWithoutID(t *testing.T) { "comment": "3", "config": { "urls": [ - "http://127.0.0.1:8086" + "http://127.0.0.1:9999" ], "token": "token1", "organization": "org", @@ -89,10 +89,9 @@ func TestTelegrafConfigJSONDecodeWithoutID(t *testing.T) { ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. @@ -118,13 +117,36 @@ func TestTelegrafConfigJSONDecodeWithoutID(t *testing.T) { ## Valid time units are "ns", "us" (or "µs"), "ms", "s". precision = "" - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" ## Override default hostname, if empty use os.Hostname() hostname = "" @@ -135,22 +157,44 @@ func TestTelegrafConfigJSONDecodeWithoutID(t *testing.T) { percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## If true, collect raw CPU time metrics. + ## If true, collect raw CPU time metrics collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. + ## If true, compute and report the sum of all non-idle CPU states report_active = false [[inputs.kernel]] + # no configuration [[inputs.kubernetes]] ## URL for the kubelet - ## exp: http://1.1.1.1:10255 - url = "http://1.1.1.1:12" -[[outputs.influxdb_v2]] + url = "http://1.1.1.1:12" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## Pod labels to be added as tags. An empty array for both include and + ## exclude will include all labels. + # label_include = [] + # label_exclude = ["*"] + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +[[outputs.influxdb_v2]] ## The URLs of the InfluxDB cluster nodes. ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:8086 - urls = ["http://127.0.0.1:8086"] + ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] + urls = ["http://127.0.0.1:9999"] ## Token for authentication. token = "token1" @@ -160,6 +204,40 @@ func TestTelegrafConfigJSONDecodeWithoutID(t *testing.T) { ## Destination bucket to write into. bucket = "bucket" + + ## The value of this tag will be used to determine the bucket. If this + ## tag is not set the 'bucket' option is used as the default. + # bucket_tag = "" + + ## If true, the bucket tag will not be added to the metric. + # exclude_bucket_tag = false + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "gzip" + + ## Enable or disable uint support for writing uints influxdb 2.0. + # influx_uint_support = false + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false `, Metadata: map[string]interface{}{"buckets": []string{"bucket"}}, } @@ -173,11 +251,11 @@ func TestTelegrafConfigJSONDecodeWithoutID(t *testing.T) { } } -// tests forwards compatibility with the new plugin unaware system. +// tests forwards compatibillity with the new plugin unaware system. func TestTelegrafConfigJSONDecodeTOML(t *testing.T) { s := `{ "name": "config 2", - "config": "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n[[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n[[inputs.kernel]]\n[[inputs.kubernetes]]\n ## URL for the kubelet\n ## exp: http://1.1.1.1:10255\n url = \"http://1.1.1.1:12\"\n[[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:8086\n urls = [\"http://127.0.0.1:8086\"]\n\n ## Token for authentication.\n token = \"token1\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"org\"\n\n ## Destination bucket to write into.\n bucket = \"bucket\"\n" + "config": "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## Maximum number of unwritten metrics per output. Increasing this value\n ## allows for longer periods of output downtime without dropping metrics at the\n ## cost of higher maximum memory usage.\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Log at debug level.\n # debug = false\n ## Log only error level messages.\n # quiet = false\n\n ## Log target controls the destination for logs and can be one of \"file\",\n ## \"stderr\" or, on Windows, \"eventlog\". When set to \"file\", the output file\n ## is determined by the \"logfile\" setting.\n # logtarget = \"file\"\n\n ## Name of the file to be logged to when using the \"file\" logtarget. If set to\n ## the empty string then logs are written to stderr.\n # logfile = \"\"\n\n ## The logfile will be rotated after the time interval specified. When set\n ## to 0 no time based rotation is performed. Logs are rotated only when\n ## written to, if there is no log activity rotation may be delayed.\n # logfile_rotation_interval = \"0d\"\n\n ## The logfile will be rotated when it becomes larger than the specified\n ## size. When set to 0 no size based rotation is performed.\n # logfile_rotation_max_size = \"0MB\"\n\n ## Maximum number of rotated archives to keep, any older logs are deleted.\n ## If set to -1, no archives are removed.\n # logfile_rotation_max_archives = 5\n\n ## Pick a timezone to use when logging or type 'local' for local time.\n ## Example: America/Chicago\n # log_with_timezone = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n[[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states\n report_active = false\n[[inputs.kernel]]\n # no configuration\n[[inputs.kubernetes]]\n ## URL for the kubelet\n url = \"http://1.1.1.1:12\"\n\n ## Use bearer token for authorization. ('bearer_token' takes priority)\n ## If both of these are empty, we'll use the default serviceaccount:\n ## at: /run/secrets/kubernetes.io/serviceaccount/token\n # bearer_token = \"/path/to/bearer/token\"\n ## OR\n # bearer_token_string = \"abc_123\"\n\n ## Pod labels to be added as tags. An empty array for both include and\n ## exclude will include all labels.\n # label_include = []\n # label_exclude = [\"*\"]\n\n ## Set response_timeout (default 5 seconds)\n # response_timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = /path/to/cafile\n # tls_cert = /path/to/certfile\n # tls_key = /path/to/keyfile\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n[[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"url1\", \"url2\"]\n\n ## Token for authentication.\n token = \"token1\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"org1\"\n\n ## Destination bucket to write into.\n bucket = \"bucket1\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n" }` want := &TelegrafConfig{ @@ -195,10 +273,9 @@ func TestTelegrafConfigJSONDecodeTOML(t *testing.T) { ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. @@ -224,13 +301,36 @@ func TestTelegrafConfigJSONDecodeTOML(t *testing.T) { ## Valid time units are "ns", "us" (or "µs"), "ms", "s". precision = "" - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" ## Override default hostname, if empty use os.Hostname() hostname = "" @@ -241,33 +341,89 @@ func TestTelegrafConfigJSONDecodeTOML(t *testing.T) { percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## If true, collect raw CPU time metrics. + ## If true, collect raw CPU time metrics collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. + ## If true, compute and report the sum of all non-idle CPU states report_active = false [[inputs.kernel]] + # no configuration [[inputs.kubernetes]] ## URL for the kubelet - ## exp: http://1.1.1.1:10255 url = "http://1.1.1.1:12" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## Pod labels to be added as tags. An empty array for both include and + ## exclude will include all labels. + # label_include = [] + # label_exclude = ["*"] + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false [[outputs.influxdb_v2]] ## The URLs of the InfluxDB cluster nodes. ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:8086 - urls = ["http://127.0.0.1:8086"] + ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] + urls = ["url1", "url2"] ## Token for authentication. token = "token1" ## Organization is the name of the organization you wish to write to; must exist. - organization = "org" + organization = "org1" ## Destination bucket to write into. - bucket = "bucket" + bucket = "bucket1" + + ## The value of this tag will be used to determine the bucket. If this + ## tag is not set the 'bucket' option is used as the default. + # bucket_tag = "" + + ## If true, the bucket tag will not be added to the metric. + # exclude_bucket_tag = false + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "gzip" + + ## Enable or disable uint support for writing uints influxdb 2.0. + # influx_uint_support = false + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false `, - Metadata: map[string]interface{}{"buckets": []string{"bucket"}}, + Metadata: map[string]interface{}{"buckets": []string{"bucket1"}}, } got := new(TelegrafConfig) err := json.Unmarshal([]byte(s), got) @@ -296,7 +452,7 @@ func TestTelegrafConfigJSONCompatibleMode(t *testing.T) { cfg: &TelegrafConfig{ ID: *id1, OrgID: *id2, - Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:8086\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", + Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## Maximum number of unwritten metrics per output. Increasing this value\n ## allows for longer periods of output downtime without dropping metrics at the\n ## cost of higher maximum memory usage.\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Log at debug level.\n # debug = false\n ## Log only error level messages.\n # quiet = false\n\n ## Log target controls the destination for logs and can be one of \"file\",\n ## \"stderr\" or, on Windows, \"eventlog\". When set to \"file\", the output file\n ## is determined by the \"logfile\" setting.\n # logtarget = \"file\"\n\n ## Name of the file to be logged to when using the \"file\" logtarget. If set to\n ## the empty string then logs are written to stderr.\n # logfile = \"\"\n\n ## The logfile will be rotated after the time interval specified. When set\n ## to 0 no time based rotation is performed. Logs are rotated only when\n ## written to, if there is no log activity rotation may be delayed.\n # logfile_rotation_interval = \"0d\"\n\n ## The logfile will be rotated when it becomes larger than the specified\n ## size. When set to 0 no size based rotation is performed.\n # logfile_rotation_max_size = \"0MB\"\n\n ## Maximum number of rotated archives to keep, any older logs are deleted.\n ## If set to -1, no archives are removed.\n # logfile_rotation_max_archives = 5\n\n ## Pick a timezone to use when logging or type 'local' for local time.\n ## Example: America/Chicago\n # log_with_timezone = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:9999\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", }, expMeta: map[string]interface{}{"buckets": []string{}}, }, @@ -306,7 +462,7 @@ func TestTelegrafConfigJSONCompatibleMode(t *testing.T) { cfg: &TelegrafConfig{ ID: *id1, OrgID: *id2, - Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:8086\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", + Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## Maximum number of unwritten metrics per output. Increasing this value\n ## allows for longer periods of output downtime without dropping metrics at the\n ## cost of higher maximum memory usage.\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Log at debug level.\n # debug = false\n ## Log only error level messages.\n # quiet = false\n\n ## Log target controls the destination for logs and can be one of \"file\",\n ## \"stderr\" or, on Windows, \"eventlog\". When set to \"file\", the output file\n ## is determined by the \"logfile\" setting.\n # logtarget = \"file\"\n\n ## Name of the file to be logged to when using the \"file\" logtarget. If set to\n ## the empty string then logs are written to stderr.\n # logfile = \"\"\n\n ## The logfile will be rotated after the time interval specified. When set\n ## to 0 no time based rotation is performed. Logs are rotated only when\n ## written to, if there is no log activity rotation may be delayed.\n # logfile_rotation_interval = \"0d\"\n\n ## The logfile will be rotated when it becomes larger than the specified\n ## size. When set to 0 no size based rotation is performed.\n # logfile_rotation_max_size = \"0MB\"\n\n ## Maximum number of rotated archives to keep, any older logs are deleted.\n ## If set to -1, no archives are removed.\n # logfile_rotation_max_archives = 5\n\n ## Pick a timezone to use when logging or type 'local' for local time.\n ## Example: America/Chicago\n # log_with_timezone = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:9999\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", }, expMeta: map[string]interface{}{"buckets": []string{}}, }, @@ -316,7 +472,7 @@ func TestTelegrafConfigJSONCompatibleMode(t *testing.T) { cfg: &TelegrafConfig{ ID: *id1, OrgID: *id3, - Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:8086\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", + Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## Maximum number of unwritten metrics per output. Increasing this value\n ## allows for longer periods of output downtime without dropping metrics at the\n ## cost of higher maximum memory usage.\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Log at debug level.\n # debug = false\n ## Log only error level messages.\n # quiet = false\n\n ## Log target controls the destination for logs and can be one of \"file\",\n ## \"stderr\" or, on Windows, \"eventlog\". When set to \"file\", the output file\n ## is determined by the \"logfile\" setting.\n # logtarget = \"file\"\n\n ## Name of the file to be logged to when using the \"file\" logtarget. If set to\n ## the empty string then logs are written to stderr.\n # logfile = \"\"\n\n ## The logfile will be rotated after the time interval specified. When set\n ## to 0 no time based rotation is performed. Logs are rotated only when\n ## written to, if there is no log activity rotation may be delayed.\n # logfile_rotation_interval = \"0d\"\n\n ## The logfile will be rotated when it becomes larger than the specified\n ## size. When set to 0 no size based rotation is performed.\n # logfile_rotation_max_size = \"0MB\"\n\n ## Maximum number of rotated archives to keep, any older logs are deleted.\n ## If set to -1, no archives are removed.\n # logfile_rotation_max_archives = 5\n\n ## Pick a timezone to use when logging or type 'local' for local time.\n ## Example: America/Chicago\n # log_with_timezone = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:9999\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", }, expMeta: map[string]interface{}{"buckets": []string{}}, }, @@ -393,12 +549,12 @@ func TestTelegrafConfigJSON(t *testing.T) { } } ] - }`, *id1, *id2), + }`, id1, id2), expect: &TelegrafConfig{ ID: *id1, OrgID: *id2, Name: "n1", - Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n[[inputs.file]]\t\n ## Files to parse each interval.\n ## These accept standard unix glob matching rules, but with the addition of\n ## ** as a \"super asterisk\". ie:\n ## /var/log/**.log -> recursively find all .log files in /var/log\n ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log\n ## /var/log/apache.log -> only read the apache log file\n files = [\"f1\", \"f2\"]\n\n ## The dataformat to be read from files\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n[[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n[[outputs.file]]\n ## Files to write to, \"stdout\" is a specially handled file.\n files = [\"stdout\"]\n[[outputs.influxdb_v2]]\t\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:8086\n urls = [\"url1\", \"url2\"]\n\n ## Token for authentication.\n token = \"tok1\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n", + Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## Maximum number of unwritten metrics per output. Increasing this value\n ## allows for longer periods of output downtime without dropping metrics at the\n ## cost of higher maximum memory usage.\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Log at debug level.\n # debug = false\n ## Log only error level messages.\n # quiet = false\n\n ## Log target controls the destination for logs and can be one of \"file\",\n ## \"stderr\" or, on Windows, \"eventlog\". When set to \"file\", the output file\n ## is determined by the \"logfile\" setting.\n # logtarget = \"file\"\n\n ## Name of the file to be logged to when using the \"file\" logtarget. If set to\n ## the empty string then logs are written to stderr.\n # logfile = \"\"\n\n ## The logfile will be rotated after the time interval specified. When set\n ## to 0 no time based rotation is performed. Logs are rotated only when\n ## written to, if there is no log activity rotation may be delayed.\n # logfile_rotation_interval = \"0d\"\n\n ## The logfile will be rotated when it becomes larger than the specified\n ## size. When set to 0 no size based rotation is performed.\n # logfile_rotation_max_size = \"0MB\"\n\n ## Maximum number of rotated archives to keep, any older logs are deleted.\n ## If set to -1, no archives are removed.\n # logfile_rotation_max_archives = 5\n\n ## Pick a timezone to use when logging or type 'local' for local time.\n ## Example: America/Chicago\n # log_with_timezone = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n[[inputs.file]]\n ## Files to parse each interval. Accept standard unix glob matching rules,\n ## as well as ** to match recursive files and directories.\n files = [\"f1\", \"f2\"]\n\n ## Name a tag containing the name of the file the data was parsed from. Leave empty\n ## to disable.\n # file_tag = \"\"\n\n ## Character encoding to use when interpreting the file contents. Invalid\n ## characters are replaced using the unicode replacement character. When set\n ## to the empty string the data is not decoded to text.\n ## ex: character_encoding = \"utf-8\"\n ## character_encoding = \"utf-16le\"\n ## character_encoding = \"utf-16be\"\n ## character_encoding = \"\"\n # character_encoding = \"\"\n\n ## The dataformat to be read from files\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n[[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states\n report_active = false\n[[outputs.file]]\n ## Files to write to, \"stdout\" is a specially handled file.\n files = [\"stdout\"]\n\n ## Use batch serialization format instead of line based delimiting. The\n ## batch format allows for the production of non line based output formats and\n ## may more efficiently encode metric groups.\n # use_batch_format = false\n\n ## The file will be rotated after the time interval specified. When set\n ## to 0 no time based rotation is performed.\n # rotation_interval = \"0d\"\n\n ## The logfile will be rotated when it becomes larger than the specified\n ## size. When set to 0 no size based rotation is performed.\n # rotation_max_size = \"0MB\"\n\n ## Maximum number of rotated archives to keep, any older logs are deleted.\n ## If set to -1, no archives are removed.\n # rotation_max_archives = 5\n\n ## Data format to output.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md\n data_format = \"influx\"\n[[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"url1\", \"url2\"]\n\n ## Token for authentication.\n token = \"tok1\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n", Metadata: map[string]interface{}{"buckets": []string{}}, }, }, @@ -418,7 +574,7 @@ func TestTelegrafConfigJSON(t *testing.T) { } } ] - }`, *id1, *id2), + }`, id1, id2), err: &errors.Error{ Code: errors.EInvalid, Msg: fmt.Sprintf(ErrUnsupportTelegrafPluginType, "aggregator"), @@ -440,7 +596,7 @@ func TestTelegrafConfigJSON(t *testing.T) { } } ] - }`, *id1, *id2), + }`, id1, id2), err: &errors.Error{ Code: errors.EInvalid, Msg: fmt.Sprintf(ErrUnsupportTelegrafPluginName, "kafka", plugins.Output), @@ -507,7 +663,7 @@ func TestLegacyStruct(t *testing.T) { } } ] - }`, *id1) + }`, id1) want := `# Configuration for telegraf agent [agent] ## Default data collection interval for all inputs @@ -521,10 +677,9 @@ func TestLegacyStruct(t *testing.T) { ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. @@ -550,27 +705,59 @@ func TestLegacyStruct(t *testing.T) { ## Valid time units are "ns", "us" (or "µs"), "ms", "s". precision = "" - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = false -[[inputs.file]] - ## Files to parse each interval. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## /var/log/**.log -> recursively find all .log files in /var/log - ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only read the apache log file +[[inputs.file]] + ## Files to parse each interval. Accept standard unix glob matching rules, + ## as well as ** to match recursive files and directories. files = ["f1", "f2"] + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. + # file_tag = "" + + ## Character encoding to use when interpreting the file contents. Invalid + ## characters are replaced using the unicode replacement character. When set + ## to the empty string the data is not decoded to text. + ## ex: character_encoding = "utf-8" + ## character_encoding = "utf-16le" + ## character_encoding = "utf-16be" + ## character_encoding = "" + # character_encoding = "" + ## The dataformat to be read from files ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -581,19 +768,42 @@ func TestLegacyStruct(t *testing.T) { percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## If true, collect raw CPU time metrics. + ## If true, collect raw CPU time metrics collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. + ## If true, compute and report the sum of all non-idle CPU states report_active = false [[outputs.file]] ## Files to write to, "stdout" is a specially handled file. files = ["stdout"] -[[outputs.influxdb_v2]] + + ## Use batch serialization format instead of line based delimiting. The + ## batch format allows for the production of non line based output formats and + ## may more efficiently encode metric groups. + # use_batch_format = false + + ## The file will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. + # rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # rotation_max_archives = 5 + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" +[[outputs.influxdb_v2]] ## The URLs of the InfluxDB cluster nodes. ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:8086 + ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] urls = ["url1", "url2"] ## Token for authentication. @@ -604,6 +814,40 @@ func TestLegacyStruct(t *testing.T) { ## Destination bucket to write into. bucket = "bucket1" + + ## The value of this tag will be used to determine the bucket. If this + ## tag is not set the 'bucket' option is used as the default. + # bucket_tag = "" + + ## If true, the bucket tag will not be added to the metric. + # exclude_bucket_tag = false + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "gzip" + + ## Enable or disable uint support for writing uints influxdb 2.0. + # influx_uint_support = false + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ` tc := &TelegrafConfig{} require.NoError(t, json.Unmarshal([]byte(telConfOld), tc))