chore: remove deprecated tool influx_stress (#20892)

https://github.com/influxdata/influxdb/issues/20891
pull/20903/head
Sam Arnold 2021-03-09 10:29:24 -05:00 committed by GitHub
parent bf7dddaec5
commit 6b8ec8cfe0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
72 changed files with 1 additions and 7262 deletions

4
.gitignore vendored
View File

@ -20,10 +20,6 @@ influx_tsm
**/influx_tsm
!**/influx_tsm/
influx_stress
**/influx_stress
!**/influx_stress/
influxd
**/influxd
!**/influxd/

View File

@ -88,7 +88,6 @@ for f in CONFIGURATION_FILES:
targets = {
'influx' : './cmd/influx',
'influxd' : './cmd/influxd',
'influx_stress' : './cmd/influx_stress',
'influx_inspect' : './cmd/influx_inspect',
'influx_tsm' : './cmd/influx_tsm',
}

View File

@ -1,43 +0,0 @@
# `influx_stress`
If you run into any issues with this tool please mention @jackzampolin when you create an issue.
## Ways to run
### `influx_stress`
This runs a basic stress test with the [default config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml) For more information on the configuration file please see the default.
### `influx_stress -config someConfig.toml`
This runs the stress test with a valid configuration file located at `someConfig.tom`
### `influx_stress -v2 -config someConfig.iql`
This runs the stress test with a valid `v2` configuration file. For more information about the `v2` stress test see the [v2 stress README](https://github.com/influxdata/influxdb/blob/master/stress/v2/README.md).
## Flags
If flags are defined they overwrite the config from any file passed in.
### `-addr` string
IP address and port of database where response times will persist (e.g., localhost:8086)
`default` = "http://localhost:8086"
### `-config` string
The relative path to the stress test configuration file.
`default` = [config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml)
### `-cpuprofile` filename
Writes the result of Go's cpu profile to filename
`default` = no profiling
### `-database` string
Name of database on `-addr` that `influx_stress` will persist write and query response times
`default` = "stress"
### `-tags` value
A comma separated list of tags to add to write and query response times.
`default` = ""

View File

@ -1,92 +0,0 @@
# This section can be removed
[provision]
# The basic provisioner simply deletes and creates database.
# If `reset_database` is false, it will not attempt to delete the database
[provision.basic]
# If enabled the provisioner will actually run
enabled = true
# Address of the instance that is to be provisioned
address = "localhost:8086"
# Database that will be created/deleted
database = "stress"
# Attempt to delete database
reset_database = true
# This section cannot be commented out
# To prevent writes set `enabled=false`
# in [write.influx_client.basic]
[write]
[write.point_generator]
# The basic point generator will generate points of the form
# `cpu,host=server-%v,location=us-west value=234 123456`
[write.point_generator.basic]
# number of points that will be written for each of the series
point_count = 100
# number of series
series_count = 100000
# How much time between each timestamp
tick = "10s"
# Randomize timestamp a bit (not functional)
jitter = true
# Precision of points that are being written
precision = "s"
# name of the measurement that will be written
measurement = "cpu"
# The date for the first point that is written into influx
start_date = "2006-Jan-02"
# Defines a tag for a series
[[write.point_generator.basic.tag]]
key = "host"
value = "server"
[[write.point_generator.basic.tag]]
key = "location"
value = "us-west"
# Defines a field for a series
[[write.point_generator.basic.field]]
key = "value"
value = "float64" # supported types: float64, int, bool
[write.influx_client]
[write.influx_client.basic]
# If enabled the writer will actually write
enabled = true
# Addresses is an array of the Influxdb instances
addresses = ["localhost:8086"] # stress_test_server runs on port 1234
# Database that is being written to
database = "stress"
# Precision of points that are being written
precision = "s"
# Size of batches that are sent to db
batch_size = 10000
# Interval between each batch
batch_interval = "0s"
# How many concurrent writers to the db
concurrency = 10
# ssl enabled?
ssl = false
# format of points that are written to influxdb
format = "line_http" # line_udp (not supported yet), graphite_tcp (not supported yet), graphite_udp (not supported yet)
# This section can be removed
[read]
[read.query_generator]
[read.query_generator.basic]
# Template of the query that will be ran against the instance
template = "SELECT count(value) FROM cpu where host='server-%v'"
# How many times the templated query will be ran
query_count = 250
[read.query_client]
[read.query_client.basic]
# if enabled the reader will actually read
enabled = true
# Address of the instance that will be queried
addresses = ["localhost:8086"]
# Database that will be queried
database = "stress"
# Interval bewteen queries
query_interval = "100ms"
# Number of concurrent queriers
concurrency = 1

View File

@ -1,71 +0,0 @@
// Command influx_stress is deprecated; use github.com/influxdata/influx-stress instead.
package main
import (
"flag"
"fmt"
"log"
"os"
"runtime/pprof"
"github.com/influxdata/influxdb/stress"
v2 "github.com/influxdata/influxdb/stress/v2"
)
var (
useV2 = flag.Bool("v2", false, "Use version 2 of stress tool")
config = flag.String("config", "", "The stress test file")
cpuprofile = flag.String("cpuprofile", "", "Write the cpu profile to `filename`")
db = flag.String("db", "", "target database within test system for write and query load")
)
func main() {
o := stress.NewOutputConfig()
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
fmt.Println(err)
return
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
if *useV2 {
if *config != "" {
v2.RunStress(*config)
} else {
v2.RunStress("stress/v2/iql/file.iql")
}
} else {
c, err := stress.NewConfig(*config)
if err != nil {
log.Fatal(err)
return
}
if *db != "" {
c.Provision.Basic.Database = *db
c.Write.InfluxClients.Basic.Database = *db
c.Read.QueryClients.Basic.Database = *db
}
w := stress.NewWriter(c.Write.PointGenerators.Basic, &c.Write.InfluxClients.Basic)
r := stress.NewQuerier(&c.Read.QueryGenerators.Basic, &c.Read.QueryClients.Basic)
s := stress.NewStressTest(&c.Provision.Basic, w, r)
bw := stress.NewBroadcastChannel()
bw.Register(c.Write.InfluxClients.Basic.BasicWriteHandler)
bw.Register(o.HTTPHandler("write"))
br := stress.NewBroadcastChannel()
br.Register(c.Read.QueryClients.Basic.BasicReadHandler)
br.Register(o.HTTPHandler("read"))
s.Start(bw.Handle, br.Handle)
}
}

View File

@ -11,7 +11,6 @@ MAN1_TXT += influxd-run.txt
MAN1_TXT += influxd-version.txt
MAN1_TXT += influx.txt
MAN1_TXT += influx_inspect.txt
MAN1_TXT += influx_stress.txt
MAN1_TXT += influx_tsm.txt
MAN_TXT = $(MAN1_TXT)

View File

@ -1,52 +0,0 @@
influx_stress(1)
================
NAME
----
influx_stress - Runs a stress test against one or multiple InfluxDB servers
SYNOPSIS
--------
[verse]
'influx_stress' [options]
DESCRIPTION
-----------
Runs write and query stress tests against one or multiple InfluxDB servers to
create reproducible performance benchmarks against InfluxDB.
OPTIONS
-------
-addr <addr>::
IP address and port of the database where response times will persist. This
is not for specifying which database to test against. That option is located
inside of the configuration file. The default is 'http://localhost:8086'.
-database <name>::
The database where response times will persist. This is not for specifying
which database to test against. See '-db' or the configuration file for that
option. The default is 'stress'.
-retention-policy <name>::
The retention policy where response times will persist. This is not for
specifying which retention policy to test against. See the configuration file
for that option. The default is an empty string which will use the default
retention policy.
-config <path>::
The stress configuration file.
-cpuprofile <path>::
Write the cpu profile to the path. No cpu profile is written unless this is
used. This profiles 'influx_stress', not the InfluxDB server.
-db <name>::
The target database within the test system for write and query load.
-tags <values>::
A comma separated list of tags.
-v2::
Use version 2 of the stress tool. The default is to use version 1.
include::footer.txt[]

View File

@ -16,7 +16,7 @@ Exactly one of -D or -R must be provided to indicate Debian or RPM packages.
"
}
BINS=( influx influxd influx_stress influx_inspect influx_tsm )
BINS=( influx influxd influx_inspect influx_tsm )
function testInstalled() {
if ! command -v "$1" >/dev/null 2>&1 ; then
@ -49,7 +49,6 @@ function testManpages() {
for p in influxd influxd-backup influxd-config influxd-restore influxd-run influxd-version \
influx \
influx_inspect \
influx_stress \
influx_tsm ; do
if ! grep -F "/usr/share/man/man1/$p.1.gz" < "$f" > /dev/null; then
>&2 echo "Package is missing man page for $p"

View File

@ -66,7 +66,6 @@ fi
OUTDIR=$(mktemp -d)
for cmd in \
influxdb/cmd/influxd \
influxdb/cmd/influx_stress \
influxdb/cmd/influx \
influxdb/cmd/influx_inspect \
influxdb/cmd/influx_tsm \

View File

@ -1,47 +0,0 @@
## Stress Test
The logic for `StressTest` can be found in `stress/run.go`.
A new `StressTest` type was added and is composed four different parts. The `StressTest` type has one method `Start(wHandle responseHandler, rHandle responseHandler)`. This method starts the stress test.
A `responseHandler` is a function with type signature `func(r <-chan response, t *Timer)`. Response Handlers handle the read and write responses respectively.
### Provisioner
Provisions the InfluxDB instance where the stress test is going to be ran against.
Think things like, creating the database, setting up retention policies, continuous queries, etc.
### Writer
The `Writer` is responsible for Writing data into an InfluxDB instance. It has two components: `PointGenerator` and `InfluxClient`.
##### PointGenerator
The `PointGenerator` is responsible for generating points that will be written into InfluxDB. Additionally, it is reponsible for keeping track of the latest timestamp of the points it is writing (Just incase the its needed by the `Reader`).
Any type that implements the methods `Generate()` and `Time()` is a `PointGenerator`.
##### InfluxClient
The `InfluxClient` is responsible for writing the data that is generated by the `PointGenerator`.
Any type that implements `Batch(ps <-chan Point, r chan<- response)`, and `send(b []byte) response` is an `InfluxClient`.
### Reader
The `Reader` is responsible for querying the database. It has two components: `QueryGenerator` and `QueryClient`.
##### QueryGenerator
The `QueryGenerator` is responsible for generating queries.
##### QueryClient
The `QueryClient` is responsible for executing queries against an InfluxDB instance.
## Basic
`basic.go` implements an each of the components of a stress test.
## Util
`util.go` contains utility methods used throughout the package.
## Config
`config.go` contains the logic for managing the configuration of the stress test.
A sample configuration file can be found in `stress/stress.toml`. This still needs work, but whats there now is good enough IMO.
## Template
`template.go` contains the logic for a basic stress test.

View File

@ -1,115 +0,0 @@
# `influx_stress` usage and configuration
The binary for `influx_stress` comes bundled with all influx installations.
To run it against an `influxd` instance located at `localhost:8086` with the default configuration options:
See more about the [default configuration options](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml)
```bash
$ influx_stress
```
To run `influx_stress` with a configuration file:
```bash
$ influx_stress -config my_awesome_test.toml
```
To daemonize `influx_stress` and save the output to a results file:
```bash
$ influx_stress -config my_awesome_test.toml > my_awesome_test_out.txt 2>&1 &
```
To run multiple instances of `influx_stress` just change the `measurement` each test writes to, details below
```bash
$ influx_stress -config my_awesome_test1.toml > my_awesome_test_out1.txt 2>&1 &
$ influx_stress -config my_awesome_test2.toml > my_awesome_test_out2.txt 2>&1 &
```
Below is a sample configuration file with comments explaining the different options
```toml
# The [provision] section creates a new database on the target instance for the stress test to write points to and perform queries against
# This section can be deleted if the instance is manually configured. In that case make sure that the database referenced in [write] exists
# The provisioner will try to delete the database before trying to recreate it.
[provision]
[provision.basic]
# If set to false you can delete this section from the config
enabled = true
# address of the node to be provisioned
address = "<node1_ip>:8086"
# name of the database to create
database = "stress"
# This must be set to true
reset_database = true
# The [write] section defines the shape of the generated data and configures the InfluxDB client
[write]
# The [write.point_generator] defines the shape of the generated data
[write.point_generator]
[write.point_generator.basic]
# This needs to be set to true
enabled = true
# The total number of points a stress_test will write is determined by multiplying the following two numbers:
# point_count * series_count = total_points
# Number of points to write to the database for each series
point_count = 100
# Number of series to write to the database?
series_count = 100000
# This simulates collection interval in the timestamps of generated points
tick = "10s"
# This must be set to true
jitter = true
# The measurement name for the generated points
measurement = "cpu"
# The generated timestamps follow the pattern of { start_date + (n * tick) }
# This sequence is preserved for each series and is always increasing
start_date = "2006-Jan-02"
# Precision for generated points
# This setting MUST be the same as [write.influx_client.basic]precision
precision = "s"
# The '[[]]' in toml format indicates that the element is an array of items.
# [[write.point_generator.basic.tag]] defines a tag on the generated points
# key is the tag key
# value is the tag value
# The first tag defined will have '-0' through '-{series_count}' added to the end of the string
[[write.point_generator.basic.tag]]
key = "host"
value = "server"
[[write.point_generator.basic.tag]]
key = "location"
value = "us-west"
# [[write.point_generator.basic.field]] defines a field on the generated points
# key is the field key
# value is the type of the field
[[write.point_generator.basic.field]]
key = "value"
# Can be either "float64", "int", "bool"
value = "float64"
# The [write.influx_client] defines what influx instances the stress_test targets
[write.influx_client]
[write.influx_client.basic]
# This must be set to true
enabled = true
# This is an array of addresses
# addresses = ["<node1_ip>:8086","<node2_ip>:8086","<node3_ip>:8086"] to target a cluster
addresses = ["<node1_ip>:8086"] # to target an individual node
# This database in the in the target influx instance to write to
# This database MUST be created in the target instance or the test will fail
database = "stress"
# Write precision for points
# This setting MUST be the same as [write.point_generator.basic]precision
precision = "s"
# The number of point to write to the database with each POST /write sent
batch_size = 5000
# An optional amount of time for a worker to wait between POST requests
batch_interval = "0s"
# The number of workers to use to write to the database
# More workers == more load with diminishing returns starting at ~5 workers
# 10 workers provides a medium-high level of load to the database
concurrency = 10
# This must be set to false
ssl = false
# This must be set to "line_http"
format = "line_http"
```

View File

@ -1,690 +0,0 @@
package stress
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"sync"
"time"
"github.com/influxdata/influxdb/client/v2"
)
const backoffInterval = time.Duration(500 * time.Millisecond)
// AbstractTag is a struct that abstractly
// defines a tag
type AbstractTag struct {
Key string `toml:"key"`
Value string `toml:"value"`
}
// AbstractTags is a slice of abstract tags
type AbstractTags []AbstractTag
// Template returns a templated string of tags
func (t AbstractTags) Template() string {
var buf bytes.Buffer
for i, tag := range t {
if i == 0 {
buf.Write([]byte(fmt.Sprintf("%v=%v-%%v,", tag.Key, tag.Value)))
} else {
buf.Write([]byte(fmt.Sprintf("%v=%v,", tag.Key, tag.Value)))
}
}
b := buf.Bytes()
b = b[0 : len(b)-1]
return string(b)
}
// AbstractField is a struct that abstractly
// defines a field
type AbstractField struct {
Key string `toml:"key"`
Type string `toml:"type"`
}
// AbstractFields is a slice of abstract fields
type AbstractFields []AbstractField
// Template returns a templated string of fields
func (f AbstractFields) Template() (string, []string) {
var buf bytes.Buffer
a := make([]string, len(f))
for i, field := range f {
buf.Write([]byte(fmt.Sprintf("%v=%%v,", field.Key)))
a[i] = field.Type
}
b := buf.Bytes()
b = b[0 : len(b)-1]
return string(b), a
}
// BasicPointGenerator implements the PointGenerator interface
type BasicPointGenerator struct {
PointCount int `toml:"point_count"`
Tick string `toml:"tick"`
Jitter bool `toml:"jitter"`
Measurement string `toml:"measurement"`
SeriesCount int `toml:"series_count"`
Tags AbstractTags `toml:"tag"`
Fields AbstractFields `toml:"field"`
StartDate string `toml:"start_date"`
Precision string `toml:"precision"`
time time.Time
mu sync.Mutex
}
// typeArr accepts a string array of types and
// returns an array of equal length where each
// element of the array is an instance of the type
// expressed in the string array.
func typeArr(a []string) []interface{} {
i := make([]interface{}, len(a))
for j, ty := range a {
var t string
switch ty {
case "float64":
t = fmt.Sprintf("%v", time.Now().Nanosecond()%1000)
case "int":
t = fmt.Sprintf("%vi", time.Now().Nanosecond()%1000)
case "bool":
b := time.Now().Nanosecond()%2 == 1
t = fmt.Sprintf("%t", b)
default:
t = fmt.Sprintf("%v", time.Now().Nanosecond()%1000)
}
i[j] = t
}
return i
}
func (b *BasicPointGenerator) timestamp(t time.Time) int64 {
var n int64
if b.Precision == "s" {
n = t.Unix()
} else {
n = t.UnixNano()
}
return n
}
// Template returns a function that returns a pointer to a Pnt.
func (b *BasicPointGenerator) Template() func(i int, t time.Time) *Pnt {
ts := b.Tags.Template()
fs, fa := b.Fields.Template()
tmplt := fmt.Sprintf("%v,%v %v %%v", b.Measurement, ts, fs)
return func(i int, t time.Time) *Pnt {
p := &Pnt{}
arr := []interface{}{i}
arr = append(arr, typeArr(fa)...)
arr = append(arr, b.timestamp(t))
str := fmt.Sprintf(tmplt, arr...)
p.Set([]byte(str))
return p
}
}
// Pnt is a struct that implements the Point interface.
type Pnt struct {
line []byte
}
// Set sets the internal state for a Pnt.
func (p *Pnt) Set(b []byte) {
p.line = b
}
// Next generates very simple points very
// efficiently.
// TODO: Take this out
func (p *Pnt) Next(i int, t time.Time) {
p.line = []byte(fmt.Sprintf("a,b=c-%v v=%v", i, i))
}
// Line returns a byte array for a point
// in line protocol format.
func (p Pnt) Line() []byte {
return p.line
}
// Graphite returns a byte array for a point
// in graphite format.
func (p Pnt) Graphite() []byte {
// TODO: Implement
return []byte("")
}
// OpenJSON returns a byte array for a point
// in opentsdb json format
func (p Pnt) OpenJSON() []byte {
// TODO: Implement
return []byte("")
}
// OpenTelnet returns a byte array for a point
// in opentsdb-telnet format
func (p Pnt) OpenTelnet() []byte {
// TODO: Implement
return []byte("")
}
// Generate returns a point channel. Implements the
// Generate method for the PointGenerator interface
func (b *BasicPointGenerator) Generate() (<-chan Point, error) {
// TODO: should be 1.5x batch size
c := make(chan Point, 15000)
tmplt := b.Template()
go func(c chan Point) {
defer close(c)
var start time.Time
var err error
if b.StartDate == "now" {
start = time.Now()
} else {
start, err = time.Parse("2006-Jan-02", b.StartDate)
}
if err != nil {
fmt.Println(err)
return
}
b.mu.Lock()
b.time = start
b.mu.Unlock()
tick, err := time.ParseDuration(b.Tick)
if err != nil {
fmt.Println(err)
return
}
for i := 0; i < b.PointCount; i++ {
b.mu.Lock()
b.time = b.time.Add(tick)
b.mu.Unlock()
for j := 0; j < b.SeriesCount; j++ {
p := tmplt(j, b.time)
c <- *p
}
}
}(c)
return c, nil
}
// Time returns the timestamp for the latest points
// that are being generated. Implements the Time method
// for the PointGenerator interface.
func (b *BasicPointGenerator) Time() time.Time {
defer b.mu.Unlock()
b.mu.Lock()
t := b.time
return t
}
// BasicClient implements the InfluxClient
// interface.
type BasicClient struct {
Enabled bool `toml:"enabled"`
Addresses []string `toml:"addresses"`
Database string `toml:"database"`
RetentionPolicy string `toml:"retention-policy"`
Precision string `toml:"precision"`
BatchSize int `toml:"batch_size"`
BatchInterval string `toml:"batch_interval"`
Concurrency int `toml:"concurrency"`
SSL bool `toml:"ssl"`
Format string `toml:"format"`
addrId int
r chan<- response
interval time.Duration
}
func (c *BasicClient) retry(b []byte, backoff time.Duration) {
bo := backoff + backoffInterval
rs, err := c.send(b)
time.Sleep(c.interval)
c.r <- rs
if !rs.Success() || err != nil {
time.Sleep(bo)
c.retry(b, bo)
}
}
// Batch groups together points
func (c *BasicClient) Batch(ps <-chan Point, r chan<- response) error {
if !c.Enabled {
return nil
}
instanceURLs := make([]string, len(c.Addresses))
for i := 0; i < len(c.Addresses); i++ {
instanceURLs[i] = fmt.Sprintf("http://%v/write?db=%v&rp=%v&precision=%v", c.Addresses[i], c.Database, c.RetentionPolicy, c.Precision)
}
c.Addresses = instanceURLs
c.r = r
var buf bytes.Buffer
var wg sync.WaitGroup
counter := NewConcurrencyLimiter(c.Concurrency)
interval, err := time.ParseDuration(c.BatchInterval)
if err != nil {
return err
}
c.interval = interval
ctr := 0
writeBatch := func(b []byte) {
wg.Add(1)
counter.Increment()
go func(byt []byte) {
c.retry(byt, time.Duration(1))
counter.Decrement()
wg.Done()
}(b)
}
for p := range ps {
b := p.Line()
c.addrId = ctr % len(c.Addresses)
ctr++
buf.Write(b)
buf.Write([]byte("\n"))
if ctr%c.BatchSize == 0 && ctr != 0 {
b := buf.Bytes()
if len(b) == 0 {
continue
}
// Trimming the trailing newline character
b = b[0 : len(b)-1]
writeBatch(b)
var temp bytes.Buffer
buf = temp
}
}
// Write out any remaining points
b := buf.Bytes()
if len(b) > 0 {
writeBatch(b)
}
wg.Wait()
return nil
}
// post sends a post request with a payload of points
func post(url string, datatype string, data io.Reader) (*http.Response, error) {
resp, err := http.Post(url, datatype, data)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
err := errors.New(string(body))
return nil, err
}
return resp, nil
}
// Send calls post and returns a response
func (c *BasicClient) send(b []byte) (response, error) {
t := NewTimer()
resp, err := post(c.Addresses[c.addrId], "", bytes.NewBuffer(b))
t.StopTimer()
if err != nil {
return response{Timer: t}, err
}
r := response{
Resp: resp,
Time: time.Now(),
Timer: t,
}
return r, nil
}
// BasicQuery implements the QueryGenerator interface
type BasicQuery struct {
Template Query `toml:"template"`
QueryCount int `toml:"query_count"`
time time.Time
}
// QueryGenerate returns a Query channel
func (q *BasicQuery) QueryGenerate(now func() time.Time) (<-chan Query, error) {
c := make(chan Query)
go func(chan Query) {
defer close(c)
for i := 0; i < q.QueryCount; i++ {
c <- Query(fmt.Sprintf(string(q.Template), i))
}
}(c)
return c, nil
}
// SetTime sets the internal state of time
func (q *BasicQuery) SetTime(t time.Time) {
q.time = t
}
// BasicQueryClient implements the QueryClient interface
type BasicQueryClient struct {
Enabled bool `toml:"enabled"`
Addresses []string `toml:"addresses"`
Database string `toml:"database"`
QueryInterval string `toml:"query_interval"`
Concurrency int `toml:"concurrency"`
clients []client.Client
addrId int
}
// Init initializes the InfluxDB client
func (b *BasicQueryClient) Init() error {
for _, a := range b.Addresses {
cl, err := client.NewHTTPClient(client.HTTPConfig{
Addr: fmt.Sprintf("http://%v", a),
})
if err != nil {
return err
}
b.clients = append(b.clients, cl)
}
return nil
}
// Query runs the query
func (b *BasicQueryClient) Query(cmd Query) (response, error) {
q := client.Query{
Command: string(cmd),
Database: b.Database,
}
t := NewTimer()
_, err := b.clients[b.addrId].Query(q)
t.StopTimer()
if err != nil {
return response{Timer: t}, err
}
// Needs actual response type
r := response{
Time: time.Now(),
Timer: t,
}
return r, nil
}
// Exec listens to the query channel an executes queries as they come in
func (b *BasicQueryClient) Exec(qs <-chan Query, r chan<- response) error {
if !b.Enabled {
return nil
}
var wg sync.WaitGroup
counter := NewConcurrencyLimiter(b.Concurrency)
b.Init()
interval, err := time.ParseDuration(b.QueryInterval)
if err != nil {
return err
}
ctr := 0
for q := range qs {
b.addrId = ctr % len(b.Addresses)
ctr++
wg.Add(1)
counter.Increment()
func(q Query) {
defer wg.Done()
qr, _ := b.Query(q)
r <- qr
time.Sleep(interval)
counter.Decrement()
}(q)
}
wg.Wait()
return nil
}
// resetDB will drop an create a new database on an existing
// InfluxDB instance.
func resetDB(c client.Client, database string) error {
_, err := c.Query(client.Query{
Command: fmt.Sprintf("DROP DATABASE %s", database),
})
if err != nil {
return err
}
_, err = c.Query(client.Query{
Command: fmt.Sprintf("CREATE DATABASE %s", database),
})
return err
}
// BasicProvisioner implements the Provisioner
// interface.
type BasicProvisioner struct {
Enabled bool `toml:"enabled"`
Address string `toml:"address"`
Database string `toml:"database"`
ResetDatabase bool `toml:"reset_database"`
}
// Provision runs the resetDB function.
func (b *BasicProvisioner) Provision() error {
if !b.Enabled {
return nil
}
cl, err := client.NewHTTPClient(client.HTTPConfig{
Addr: fmt.Sprintf("http://%v", b.Address),
})
if err != nil {
return err
}
if b.ResetDatabase {
resetDB(cl, b.Database)
}
return nil
}
type BroadcastChannel struct {
chs []chan response
wg sync.WaitGroup
fns []func(t *Timer)
}
func NewBroadcastChannel() *BroadcastChannel {
chs := make([]chan response, 0)
return &BroadcastChannel{chs: chs}
}
func (b *BroadcastChannel) Register(fn responseHandler) {
ch := make(chan response)
b.chs = append(b.chs, ch)
f := func(t *Timer) {
go fn(ch, t)
}
b.fns = append(b.fns, f)
}
func (b *BroadcastChannel) Broadcast(r response) {
b.wg.Add(1)
for _, ch := range b.chs {
b.wg.Add(1)
go func(ch chan response) {
ch <- r
b.wg.Done()
}(ch)
}
b.wg.Done()
}
func (b *BroadcastChannel) Close() {
b.wg.Wait()
for _, ch := range b.chs {
close(ch)
// Workaround
time.Sleep(1 * time.Second)
}
}
func (b *BroadcastChannel) Handle(rs <-chan response, t *Timer) {
// Start all of the handlers
for _, fn := range b.fns {
fn(t)
}
for i := range rs {
b.Broadcast(i)
}
b.Close()
}
// BasicWriteHandler handles write responses.
func (b *BasicClient) BasicWriteHandler(rs <-chan response, wt *Timer) {
n := 0
success := 0
fail := 0
s := time.Duration(0)
for t := range rs {
n++
if t.Success() {
success++
} else {
fail++
}
s += t.Timer.Elapsed()
}
if n == 0 {
return
}
fmt.Printf("Total Requests: %v\n", n)
fmt.Printf(" Success: %v\n", success)
fmt.Printf(" Fail: %v\n", fail)
fmt.Printf("Average Response Time: %v\n", s/time.Duration(n))
fmt.Printf("Points Per Second: %v\n\n", int(float64(n)*float64(b.BatchSize)/float64(wt.Elapsed().Seconds())))
}
// BasicReadHandler handles read responses.
func (b *BasicQueryClient) BasicReadHandler(r <-chan response, rt *Timer) {
n := 0
s := time.Duration(0)
for t := range r {
n++
s += t.Timer.Elapsed()
}
if n == 0 {
return
}
fmt.Printf("Total Queries: %v\n", n)
fmt.Printf("Average Query Response Time: %v\n\n", s/time.Duration(n))
}
func (o *outputConfig) HTTPHandler(method string) func(r <-chan response, rt *Timer) {
return func(r <-chan response, rt *Timer) {
c, _ := client.NewHTTPClient(client.HTTPConfig{
Addr: o.addr,
})
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Database: o.database,
RetentionPolicy: o.retentionPolicy,
Precision: "ns",
})
for p := range r {
tags := make(map[string]string, len(o.tags))
for k, v := range o.tags {
tags[k] = v
}
tags["method"] = method
fields := map[string]interface{}{
"response_time": float64(p.Timer.Elapsed()),
}
pt, _ := client.NewPoint("performance", tags, fields, p.Time)
bp.AddPoint(pt)
if len(bp.Points())%1000 == 0 && len(bp.Points()) != 0 {
c.Write(bp)
bp, _ = client.NewBatchPoints(client.BatchPointsConfig{
Database: o.database,
RetentionPolicy: o.retentionPolicy,
Precision: "ns",
})
}
}
if len(bp.Points()) != 0 {
c.Write(bp)
}
}
}

View File

@ -1,145 +0,0 @@
package stress
import (
"flag"
"fmt"
"strings"
"github.com/BurntSushi/toml"
)
// Config is a struct for the Stress test configuration
type Config struct {
Provision Provision `toml:"provision"`
Write Write `toml:"write"`
Read Read `toml:"read"`
}
// Provision is a struct that contains the configuration
// parameters for all implemented Provisioner's.
type Provision struct {
Basic BasicProvisioner `toml:"basic"`
}
// Write is a struct that contains the configuration
// parameters for the stress test Writer.
type Write struct {
PointGenerators PointGenerators `toml:"point_generator"`
InfluxClients InfluxClients `toml:"influx_client"`
}
// PointGenerators is a struct that contains the configuration
// parameters for all implemented PointGenerator's.
type PointGenerators struct {
Basic *BasicPointGenerator `toml:"basic"`
}
// InfluxClients is a struct that contains the configuration
// parameters for all implemented InfluxClient's.
type InfluxClients struct {
Basic BasicClient `toml:"basic"`
}
// Read is a struct that contains the configuration
// parameters for the stress test Reader.
type Read struct {
QueryGenerators QueryGenerators `toml:"query_generator"`
QueryClients QueryClients `toml:"query_client"`
}
// QueryGenerators is a struct that contains the configuration
// parameters for all implemented QueryGenerator's.
type QueryGenerators struct {
Basic BasicQuery `toml:"basic"`
}
// QueryClients is a struct that contains the configuration
// parameters for all implemented QueryClient's.
type QueryClients struct {
Basic BasicQueryClient `toml:"basic"`
}
// NewConfig returns a pointer to a Config
func NewConfig(s string) (*Config, error) {
var c *Config
var err error
if s == "" {
c, err = BasicStress()
} else {
c, err = DecodeFile(s)
}
return c, err
}
// DecodeFile takes a file path for a toml config file
// and returns a pointer to a Config Struct.
func DecodeFile(s string) (*Config, error) {
t := &Config{}
// Decode the toml file
if _, err := toml.DecodeFile(s, t); err != nil {
return nil, err
}
return t, nil
}
// DecodeConfig takes a file path for a toml config file
// and returns a pointer to a Config Struct.
func DecodeConfig(s string) (*Config, error) {
t := &Config{}
// Decode the toml file
if _, err := toml.Decode(s, t); err != nil {
return nil, err
}
return t, nil
}
type outputConfig struct {
tags map[string]string
addr string
database string
retentionPolicy string
}
func (t *outputConfig) SetParams(addr, db, rp string) {
t.addr = addr
t.database = db
t.retentionPolicy = rp
}
func NewOutputConfig() *outputConfig {
var o outputConfig
tags := make(map[string]string)
o.tags = tags
database := flag.String("database", "stress", "name of database where the response times will persist")
retentionPolicy := flag.String("retention-policy", "", "name of the retention policy where the response times will persist")
address := flag.String("addr", "http://localhost:8086", "IP address and port of database where response times will persist (e.g., localhost:8086)")
flag.Var(&o, "tags", "A comma separated list of tags")
flag.Parse()
o.SetParams(*address, *database, *retentionPolicy)
return &o
}
func (t *outputConfig) String() string {
var s string
for k, v := range t.tags {
s += fmt.Sprintf("%v=%v ", k, v)
}
return fmt.Sprintf("%v %v %v %v", s, t.database, t.retentionPolicy, t.addr)
}
func (t *outputConfig) Set(value string) error {
for _, s := range strings.Split(value, ",") {
tags := strings.Split(s, "=")
t.tags[tags[0]] = tags[1]
}
return nil
}

View File

@ -1,335 +0,0 @@
package stress // import "github.com/influxdata/influxdb/stress"
import (
"bytes"
"fmt"
"net/http"
"sync"
"time"
)
// Point is an interface that is used to represent
// the abstract idea of a point in InfluxDB.
type Point interface {
Line() []byte
Graphite() []byte
OpenJSON() []byte
OpenTelnet() []byte
}
///////////////////////////////////////////////////
// Example Implementation of the Point Interface //
///////////////////////////////////////////////////
// KeyValue is an intermediate type that is used
// to express Tag and Field similarly.
type KeyValue struct {
Key string
Value string
}
// Tag is a struct for a tag in influxdb.
type Tag KeyValue
// Field is a struct for a field in influxdb.
type Field KeyValue
// Tags is an slice of all the tags for a point.
type Tags []Tag
// Fields is an slice of all the fields for a point.
type Fields []Field
// tagset returns a byte array for a points tagset.
func (t Tags) tagset() []byte {
var buf bytes.Buffer
for _, tag := range t {
buf.Write([]byte(fmt.Sprintf("%v=%v,", tag.Key, tag.Value)))
}
b := buf.Bytes()
b = b[0 : len(b)-1]
return b
}
// fieldset returns a byte array for a points fieldset.
func (f Fields) fieldset() []byte {
var buf bytes.Buffer
for _, field := range f {
buf.Write([]byte(fmt.Sprintf("%v=%v,", field.Key, field.Value)))
}
b := buf.Bytes()
b = b[0 : len(b)-1]
return b
}
// StdPoint represents a point in InfluxDB
type StdPoint struct {
Measurement string
Tags Tags
Fields Fields
Timestamp int64
}
// Line returns a byte array for a point in
// line-protocol format
func (p StdPoint) Line() []byte {
var buf bytes.Buffer
buf.Write([]byte(fmt.Sprintf("%v,", p.Measurement)))
buf.Write(p.Tags.tagset())
buf.Write([]byte(" "))
buf.Write(p.Fields.fieldset())
buf.Write([]byte(" "))
buf.Write([]byte(fmt.Sprintf("%v", p.Timestamp)))
byt := buf.Bytes()
return byt
}
// Graphite returns a byte array for a point
// in graphite-protocol format
func (p StdPoint) Graphite() []byte {
// TODO: implement
// timestamp is at second level resolution
// but can be specified as a float to get nanosecond
// level precision
t := "tag_1.tag_2.measurement[.field] acutal_value timestamp"
return []byte(t)
}
// OpenJSON returns a byte array for a point
// in JSON format
func (p StdPoint) OpenJSON() []byte {
// TODO: implement
//[
// {
// "metric": "sys.cpu.nice",
// "timestamp": 1346846400,
// "value": 18,
// "tags": {
// "host": "web01",
// "dc": "lga"
// }
// },
// {
// "metric": "sys.cpu.nice",
// "timestamp": 1346846400,
// "value": 9,
// "tags": {
// "host": "web02",
// "dc": "lga"
// }
// }
//]
return []byte("hello")
}
// OpenTelnet returns a byte array for a point
// in OpenTSDB-telnet format
func (p StdPoint) OpenTelnet() []byte {
// TODO: implement
// timestamp can be 13 digits at most
// sys.cpu.nice timestamp value tag_key_1=tag_value_1 tag_key_2=tag_value_2
return []byte("hello")
}
////////////////////////////////////////
// response is the results making
// a request to influxdb.
type response struct {
Resp *http.Response
Time time.Time
Timer *Timer
}
// Success returns true if the request
// was successful and false otherwise.
func (r response) Success() bool {
// ADD success for tcp, udp, etc
return !(r.Resp == nil || r.Resp.StatusCode != 204)
}
// WriteResponse is a response for a Writer
type WriteResponse response
// QueryResponse is a response for a Querier
type QueryResponse struct {
response
Body string
}
///////////////////////////////
// Definition of the Writer ///
///////////////////////////////
// PointGenerator is an interface for generating points.
type PointGenerator interface {
Generate() (<-chan Point, error)
Time() time.Time
}
// InfluxClient is an interface for writing data to the database.
type InfluxClient interface {
Batch(ps <-chan Point, r chan<- response) error
send(b []byte) (response, error)
//ResponseHandler
}
// Writer is a PointGenerator and an InfluxClient.
type Writer struct {
PointGenerator
InfluxClient
}
// NewWriter returns a Writer.
func NewWriter(p PointGenerator, i InfluxClient) Writer {
w := Writer{
PointGenerator: p,
InfluxClient: i,
}
return w
}
////////////////////////////////
// Definition of the Querier ///
////////////////////////////////
// Query is query
type Query string
// QueryGenerator is an interface that is used
// to define queries that will be ran on the DB.
type QueryGenerator interface {
QueryGenerate(f func() time.Time) (<-chan Query, error)
SetTime(t time.Time)
}
// QueryClient is an interface that can write a query
// to an InfluxDB instance.
type QueryClient interface {
Query(q Query) (response, error)
Exec(qs <-chan Query, r chan<- response) error
}
// Querier queries the database.
type Querier struct {
QueryGenerator
QueryClient
}
// NewQuerier returns a Querier.
func NewQuerier(q QueryGenerator, c QueryClient) Querier {
r := Querier{
QueryGenerator: q,
QueryClient: c,
}
return r
}
///////////////////////////////////
// Definition of the Provisioner //
///////////////////////////////////
// Provisioner is an interface that provisions an
// InfluxDB instance
type Provisioner interface {
Provision() error
}
/////////////////////////////////
// Definition of StressTest /////
/////////////////////////////////
// StressTest is a struct that contains all of
// the logic required to execute a Stress Test
type StressTest struct {
Provisioner
Writer
Querier
}
// responseHandler
type responseHandler func(r <-chan response, t *Timer)
// Start executes the Stress Test
func (s *StressTest) Start(wHandle responseHandler, rHandle responseHandler) {
var wg sync.WaitGroup
// Provision the Instance
s.Provision()
wg.Add(1)
// Starts Writing
go func() {
r := make(chan response)
wt := NewTimer()
go func() {
defer wt.StopTimer()
defer close(r)
p, err := s.Generate()
if err != nil {
fmt.Println(err)
return
}
err = s.Batch(p, r)
if err != nil {
fmt.Println(err)
return
}
}()
// Write Results Handler
wHandle(r, wt)
wg.Done()
}()
wg.Add(1)
// Starts Querying
go func() {
r := make(chan response)
rt := NewTimer()
go func() {
defer rt.StopTimer()
defer close(r)
q, err := s.QueryGenerate(s.Time)
if err != nil {
fmt.Println(err)
return
}
err = s.Exec(q, r)
if err != nil {
fmt.Println(err)
return
}
}()
// Read Results Handler
rHandle(r, rt)
wg.Done()
}()
wg.Wait()
}
// NewStressTest returns an instance of a StressTest
func NewStressTest(p Provisioner, w Writer, r Querier) StressTest {
s := StressTest{
Provisioner: p,
Writer: w,
Querier: r,
}
return s
}

View File

@ -1,54 +0,0 @@
[provision]
[provision.basic]
enabled = true
address = "localhost:8086"
database = "stress"
reset_database = true
[write]
[write.point_generator]
[write.point_generator.basic]
enabled = true
point_count = 100
series_count = 100000
tick = "10s"
jitter = true
measurement = "cpu"
start_date = "2006-Jan-02"
[[write.point_generator.basic.tag]]
key = "host"
value = "server"
[[write.point_generator.basic.tag]]
key = "location"
value = "us-west"
[[write.point_generator.basic.field]]
key = "value"
value = "float64"
[write.influx_client]
[write.influx_client.basic]
enabled = true
addresses = ["localhost:8086","localhost:1234","localhost:5678"] # stress_test_server runs on port 1234
database = "stress"
precision = "n"
batch_size = 10000
batch_interval = "0s"
concurrency = 10
ssl = false
format = "line_http" # line_udp, graphite_tcp, graphite_udp
[read]
[read.query_generator]
[read.query_generator.basic]
template = "SELECT count(value) FROM cpu where host='server-%v'"
query_count = 250
[read.query_client]
[read.query_client.basic]
enabled = true
addresses = ["localhost:8086"]
database = "stress"
query_interval = "100ms"
concurrency = 1

View File

@ -1,594 +0,0 @@
package stress
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"regexp"
"strings"
"testing"
"time"
"github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/influxdb/models"
)
func TestTimer_StartTimer(t *testing.T) {
var epoch time.Time
tmr := &Timer{}
tmr.StartTimer()
s := tmr.Start()
if s == epoch {
t.Errorf("expected tmr.start to not be %v", s)
}
}
func TestNewTimer(t *testing.T) {
var epoch time.Time
tmr := NewTimer()
s := tmr.Start()
if s == epoch {
t.Errorf("expected tmr.start to not be %v", s)
}
e := tmr.End()
if e != epoch {
t.Errorf("expected tmr.stop to be %v, got %v", epoch, e)
}
}
func TestTimer_StopTimer(t *testing.T) {
var epoch time.Time
tmr := NewTimer()
tmr.StopTimer()
e := tmr.End()
if e == epoch {
t.Errorf("expected tmr.stop to not be %v", e)
}
}
func TestTimer_Elapsed(t *testing.T) {
tmr := NewTimer()
time.Sleep(2 * time.Second)
tmr.StopTimer()
e := tmr.Elapsed()
if time.Duration(1990*time.Millisecond) > e || e > time.Duration(3*time.Second) {
t.Errorf("expected around %s got %s", time.Duration(2*time.Second), e)
}
}
/// basic.go
// Types are off
func Test_typeArr(t *testing.T) {
var re *regexp.Regexp
var b bool
arr := []string{
"float64",
"int",
"bool",
}
ts := typeArr(arr)
re = regexp.MustCompile(`\d+`)
b = re.MatchString(ts[0].(string))
if !b {
t.Errorf("Expected line protocol float64 got %v", ts[0])
}
re = regexp.MustCompile(`\d+i`)
b = re.MatchString(ts[1].(string))
if !b {
t.Errorf("Expected line protocol int got %v", ts[1])
}
re = regexp.MustCompile(`true|false`)
b = re.MatchString(ts[2].(string))
if !b {
t.Errorf("Expected line protocol bool got %v", ts[2])
}
}
func Test_typeArrBadTypes(t *testing.T) {
arr := []string{
"default",
"rand",
"",
}
ts := typeArr(arr)
for _, x := range ts {
re := regexp.MustCompile(`\d+`)
b := re.MatchString(x.(string))
if !b {
t.Errorf("Expected line protocol float64 got %v", x)
}
}
}
func TestPnt_Line(t *testing.T) {
p := &Pnt{}
b := []byte("a,b=1,c=1 v=1")
p.Set(b)
if string(p.Line()) != string(b) {
t.Errorf("Expected `%v` to `%v`", string(b), string(p.Line()))
}
}
func TestAbstractTags_Template(t *testing.T) {
tags := AbstractTags{
AbstractTag{
Key: "host",
Value: "server",
},
AbstractTag{
Key: "location",
Value: "us-west",
},
}
s := tags.Template()
tm := "host=server-%v,location=us-west"
if s != tm {
t.Errorf("Expected %v got %v", tm, s)
}
}
func TestAbstractFields_TemplateOneField(t *testing.T) {
fields := AbstractFields{
AbstractField{
Key: "fValue",
Type: "float64",
},
}
tm, _ := fields.Template()
s := "fValue=%v"
if s != tm {
t.Errorf("Expected `%v` got `%v`", s, tm)
}
}
func TestAbstractFields_TemplateManyFields(t *testing.T) {
fields := AbstractFields{
AbstractField{
Key: "fValue",
Type: "float64",
},
AbstractField{
Key: "iValue",
Type: "int",
},
AbstractField{
Key: "bValue",
Type: "bool",
},
AbstractField{
Key: "rValue",
Type: "rnd",
},
}
tm, ty := fields.Template()
s := "fValue=%v,iValue=%v,bValue=%v,rValue=%v"
if s != tm {
t.Errorf("Expected `%v` got `%v`", s, tm)
}
for i, f := range fields {
if f.Type != ty[i] {
t.Errorf("Expected %v got %v", f.Type, ty[i])
}
}
}
var basicPG = &BasicPointGenerator{
PointCount: 100,
Tick: "10s",
Measurement: "cpu",
SeriesCount: 100,
Tags: AbstractTags{
AbstractTag{
Key: "host",
Value: "server",
},
AbstractTag{
Key: "location",
Value: "us-west",
},
},
Fields: AbstractFields{
AbstractField{
Key: "value",
Type: "float64",
},
},
StartDate: "2006-Jan-01",
}
func TestBasicPointGenerator_Template(t *testing.T) {
fn := basicPG.Template()
now := time.Now()
m := "cpu,host=server-1,location=us-west"
ts := fmt.Sprintf("%v", now.UnixNano())
tm := strings.Split(string(fn(1, now).Line()), " ")
if m != tm[0] {
t.Errorf("Expected %s got %s", m, tm[0])
}
if !strings.HasPrefix(tm[1], "value=") {
t.Errorf("Expected %v to start with `value=`", tm[1])
}
if ts != string(tm[2]) {
t.Errorf("Expected %s got %s", ts, tm[2])
}
}
func TestBasicPointGenerator_Generate(t *testing.T) {
ps, err := basicPG.Generate()
if err != nil {
t.Error(err)
}
var buf bytes.Buffer
for p := range ps {
b := p.Line()
buf.Write(b)
buf.Write([]byte("\n"))
}
bs := buf.Bytes()
bs = bs[0 : len(bs)-1]
_, err = models.ParsePoints(bs)
if err != nil {
t.Error(err)
}
}
func Test_post(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
content, _ := ioutil.ReadAll(r.Body)
lines := strings.Split(string(content), "\n")
if len(lines) != 3 {
t.Errorf("Expected 3 lines got %v", len(lines))
}
w.WriteHeader(http.StatusOK)
}))
defer ts.Close()
b := []byte(
`cpu,host=server-1,location=us-west value=100 12932
cpu,host=server-2,location=us-west value=10 12932
cpu,host=server-3,location=us-west value=120 12932`,
)
_, err := post(ts.URL, "application/x-www-form-urlencoded", bytes.NewBuffer(b))
if err != nil {
t.Error(err)
}
}
var basicIC = &BasicClient{
Addresses: []string{"localhost:8086"},
Database: "stress",
Precision: "n",
BatchSize: 1000,
BatchInterval: "0s",
Concurrency: 10,
Format: "line_http",
}
func TestBasicClient_send(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
content, _ := ioutil.ReadAll(r.Body)
lines := strings.Split(string(content), "\n")
if len(lines) != 3 {
t.Errorf("Expected 3 lines got %v", len(lines))
}
w.WriteHeader(http.StatusOK)
}))
defer ts.Close()
basicIC.Addresses[0] = ts.URL
b := []byte(
`cpu,host=server-1,location=us-west value=100 12932
cpu,host=server-2,location=us-west value=10 12932
cpu,host=server-3,location=us-west value=120 12932`,
)
_, err := basicIC.send(b)
if err != nil {
t.Error(err)
}
}
func TestBasicClient_Batch(t *testing.T) {
c := make(chan Point)
r := make(chan response)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
content, _ := ioutil.ReadAll(r.Body)
lines := strings.Split(string(content), "\n")
if len(lines) != 1000 {
t.Errorf("Expected 1000 lines got %v", len(lines))
}
w.WriteHeader(http.StatusOK)
}))
defer ts.Close()
basicIC.Addresses[0] = ts.URL[7:]
go func(c chan Point) {
defer close(c)
for i := 0; i < 1000; i++ {
p := &Pnt{}
p.Next(i, time.Now())
c <- *p
}
}(c)
err := basicIC.Batch(c, r)
close(r)
if err != nil {
t.Error(err)
}
}
var basicQ = &BasicQuery{
Template: Query("SELECT count(value) from cpu WHERE host='server-%v'"),
QueryCount: 100,
}
func TestBasicQuery_QueryGenerate(t *testing.T) {
qs, _ := basicQ.QueryGenerate(time.Now)
i := 0
for q := range qs {
tm := fmt.Sprintf(string(basicQ.Template), i)
if Query(tm) != q {
t.Errorf("Expected %v to be %v", q, tm)
}
i++
}
}
var basicQC = &BasicQueryClient{
Addresses: []string{"localhost:8086"},
Database: "stress",
QueryInterval: "10s",
Concurrency: 1,
}
func TestBasicQueryClient_Query(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(50 * time.Millisecond)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("X-Influxdb-Version", "x.x")
w.Header().Set("X-Influxdb-Build", "OSS")
var data client.Response
w.WriteHeader(http.StatusOK)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
basicQC.Addresses[0] = ts.URL[7:]
basicQC.Init()
q := "SELECT count(value) FROM cpu"
r, err := basicQC.Query(Query(q))
if err != nil {
t.Error(err)
}
var epoch time.Time
if r.Time == epoch {
t.Errorf("Expected %v to not be epoch", r.Time)
}
elapsed := r.Timer.Elapsed()
if elapsed.Nanoseconds() == 0 {
t.Errorf("Expected %v to not be 0", elapsed.Nanoseconds())
}
}
/// config.go
func Test_NewConfigWithFile(t *testing.T) {
c, err := NewConfig("stress.toml")
if err != nil {
t.Error(err)
}
p := c.Provision
w := c.Write
r := c.Read
if p.Basic.Address != "localhost:8086" {
t.Errorf("Expected `localhost:8086` got %s", p.Basic.Address)
}
if p.Basic.Database != "stress" {
t.Errorf("Expected `stress` got %s", p.Basic.Database)
}
if !p.Basic.ResetDatabase {
t.Errorf("Expected true got %v", p.Basic.ResetDatabase)
}
pg := w.PointGenerators.Basic
if pg.PointCount != 100 {
t.Errorf("Expected 100 got %v", pg.PointCount)
}
if pg.SeriesCount != 100000 {
t.Errorf("Expected 100000 got %v", pg.SeriesCount)
}
if pg.Tick != "10s" {
t.Errorf("Expected 10s got %s", pg.Tick)
}
if pg.Measurement != "cpu" {
t.Errorf("Expected cpu got %s", pg.Measurement)
}
if pg.StartDate != "2006-Jan-02" {
t.Errorf("Expected `2006-Jan-02` got `%s`", pg.StartDate)
}
// TODO: Check tags
// TODO: Check fields
wc := w.InfluxClients.Basic
if wc.Addresses[0] != "localhost:8086" {
t.Errorf("Expected `localhost:8086` got %s", wc.Addresses[0])
}
if wc.Database != "stress" {
t.Errorf("Expected stress got %s", wc.Database)
}
if wc.Precision != "n" {
t.Errorf("Expected n got %s", wc.Precision)
}
if wc.BatchSize != 10000 {
t.Errorf("Expected 10000 got %v", wc.BatchSize)
}
if wc.BatchInterval != "0s" {
t.Errorf("Expected 0s got %v", wc.BatchInterval)
}
if wc.Concurrency != 10 {
t.Errorf("Expected 10 got %v", wc.Concurrency)
}
if wc.SSL {
t.Errorf("Expected true got %v", wc.SSL)
}
if wc.Format != "line_http" {
t.Errorf("Expected `line_http` got %s", wc.Format)
}
qg := r.QueryGenerators.Basic
if qg.Template != "SELECT count(value) FROM cpu where host='server-%v'" {
t.Errorf("Expected `SELECT count(value) FROM cpu where host='server-%%v'` got %s", qg.Template)
}
if qg.QueryCount != 250 {
t.Errorf("Expected 250 got %v", qg.QueryCount)
}
qc := r.QueryClients.Basic
if qc.Addresses[0] != "localhost:8086" {
t.Errorf("Expected `localhost:8086` got %s", qc.Addresses[0])
}
if qc.Database != "stress" {
t.Errorf("Expected stress got %s", qc.Database)
}
if qc.QueryInterval != "100ms" {
t.Errorf("Expected 100ms got %s", qc.QueryInterval)
}
if qc.Concurrency != 1 {
t.Errorf("Expected 1 got %v", qc.Concurrency)
}
}
func Test_NewConfigWithoutFile(t *testing.T) {
c, err := NewConfig("")
if err != nil {
t.Error(err)
}
p := c.Provision
w := c.Write
r := c.Read
if p.Basic.Address != "localhost:8086" {
t.Errorf("Expected `localhost:8086` got %s", p.Basic.Address)
}
if p.Basic.Database != "stress" {
t.Errorf("Expected `stress` got %s", p.Basic.Database)
}
if !p.Basic.ResetDatabase {
t.Errorf("Expected true got %v", p.Basic.ResetDatabase)
}
pg := w.PointGenerators.Basic
if pg.PointCount != 100 {
t.Errorf("Expected 100 got %v", pg.PointCount)
}
if pg.SeriesCount != 100000 {
t.Errorf("Expected 100000 got %v", pg.SeriesCount)
}
if pg.Tick != "10s" {
t.Errorf("Expected 10s got %s", pg.Tick)
}
if pg.Measurement != "cpu" {
t.Errorf("Expected cpu got %s", pg.Measurement)
}
if pg.StartDate != "2006-Jan-02" {
t.Errorf("Expected `2006-Jan-02` got `%s`", pg.StartDate)
}
// TODO: Check tags
// TODO: Check fields
wc := w.InfluxClients.Basic
if wc.Addresses[0] != "localhost:8086" {
t.Errorf("Expected `localhost:8086` got %s", wc.Addresses[0])
}
if wc.Database != "stress" {
t.Errorf("Expected stress got %s", wc.Database)
}
if wc.Precision != "n" {
t.Errorf("Expected n got %s", wc.Precision)
}
if wc.BatchSize != 5000 {
t.Errorf("Expected 5000 got %v", wc.BatchSize)
}
if wc.BatchInterval != "0s" {
t.Errorf("Expected 0s got %v", wc.BatchInterval)
}
if wc.Concurrency != 10 {
t.Errorf("Expected 10 got %v", wc.Concurrency)
}
if wc.SSL {
t.Errorf("Expected true got %v", wc.SSL)
}
if wc.Format != "line_http" {
t.Errorf("Expected `line_http` got %s", wc.Format)
}
qg := r.QueryGenerators.Basic
if qg.Template != "SELECT count(value) FROM cpu where host='server-%v'" {
t.Errorf("Expected `SELECT count(value) FROM cpu where host='server-%%v'` got %s", qg.Template)
}
if qg.QueryCount != 250 {
t.Errorf("Expected 250 got %v", qg.QueryCount)
}
qc := r.QueryClients.Basic
if qc.Addresses[0] != "localhost:8086" {
t.Errorf("Expected `localhost:8086` got %s", qc.Addresses[0])
}
if qc.Database != "stress" {
t.Errorf("Expected stress got %s", qc.Database)
}
if qc.QueryInterval != "100ms" {
t.Errorf("Expected 100ms got %s", qc.QueryInterval)
}
if qc.Concurrency != 1 {
t.Errorf("Expected 1 got %v", qc.Concurrency)
}
}
/// run.go
// TODO

View File

@ -1,74 +0,0 @@
package main
import (
"expvar"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"sync"
"time"
"github.com/paulbellamy/ratecounter"
)
var (
counter *ratecounter.RateCounter
hitspersecond = expvar.NewInt("hits_per_second")
mu sync.Mutex
m sync.Mutex
)
// Query handles /query endpoint
func Query(w http.ResponseWriter, req *http.Request) {
io.WriteString(w, "du")
}
// Count handles /count endpoint
func Count(w http.ResponseWriter, req *http.Request) {
io.WriteString(w, fmt.Sprintf("%v", linecount))
}
var n int
var linecount int
// Write handles /write endpoints
func Write(w http.ResponseWriter, req *http.Request) {
mu.Lock()
n++
mu.Unlock()
counter.Incr(1)
hitspersecond.Set(counter.Rate())
w.WriteHeader(http.StatusNoContent)
fmt.Printf("Reqests Per Second: %v\n", hitspersecond)
fmt.Printf("Count: %v\n", n)
content, _ := ioutil.ReadAll(req.Body)
m.Lock()
arr := strings.Split(string(content), "\n")
linecount += len(arr)
m.Unlock()
fmt.Printf("Line Count: %v\n\n", linecount)
}
func init() {
n = 0
linecount = 0
counter = ratecounter.NewRateCounter(1 * time.Second)
}
func main() {
mux := http.NewServeMux()
mux.HandleFunc("/query", Query)
mux.HandleFunc("/write", Write)
mux.HandleFunc("/count", Count)
err := http.ListenAndServe(":1234", mux)
if err != nil {
fmt.Println("Fatal")
}
}

View File

@ -1,64 +0,0 @@
package stress
var s = `
[provision]
[provision.basic]
enabled = true
address = "localhost:8086"
database = "stress"
reset_database = true
[write]
[write.point_generator]
[write.point_generator.basic]
enabled = true
point_count = 100
series_count = 100000
tick = "10s"
jitter = true
measurement = "cpu"
start_date = "2006-Jan-02"
precision = "n"
[[write.point_generator.basic.tag]]
key = "host"
value = "server"
[[write.point_generator.basic.tag]]
key = "location"
value = "us-west"
[[write.point_generator.basic.field]]
key = "value"
value = "float64"
[write.influx_client]
[write.influx_client.basic]
enabled = true
addresses = ["localhost:8086"]
database = "stress"
precision = "n"
batch_size = 5000
batch_interval = "0s"
concurrency = 10
ssl = false
format = "line_http" # line_udp, graphite_tcp, graphite_udp
[read]
[read.query_generator]
[read.query_generator.basic]
template = "SELECT count(value) FROM cpu where host='server-%v'"
query_count = 250
[read.query_client]
[read.query_client.basic]
enabled = true
addresses = ["localhost:8086"]
database = "stress"
query_interval = "100ms"
concurrency = 1
`
// BasicStress returns a config for a basic
// stress test.
func BasicStress() (*Config, error) {
return DecodeConfig(s)
}

View File

@ -1,132 +0,0 @@
package stress
import (
"time"
)
// Timer is struct that can be used to track elaspsed time
type Timer struct {
start time.Time
end time.Time
}
// Start returns a Timers start field
func (t *Timer) Start() time.Time {
return t.start
}
// End returns a Timers end field
func (t *Timer) End() time.Time {
return t.end
}
// StartTimer sets a timers `start` field to the current time
func (t *Timer) StartTimer() {
t.start = time.Now()
}
// StopTimer sets a timers `end` field to the current time
func (t *Timer) StopTimer() {
t.end = time.Now()
}
// Elapsed returns the total elapsed time between the `start`
// and `end` fields on a timer.
func (t *Timer) Elapsed() time.Duration {
return t.end.Sub(t.start)
}
// NewTimer returns a pointer to a `Timer` struct where the
// timers `start` field has been set to `time.Now()`
func NewTimer() *Timer {
t := &Timer{}
t.StartTimer()
return t
}
// ResponseTime is a struct that contains `Value`
// `Time` pairing.
type ResponseTime struct {
Value int
Time time.Time
}
// NewResponseTime returns a new response time
// with value `v` and time `time.Now()`.
func NewResponseTime(v int) ResponseTime {
r := ResponseTime{Value: v, Time: time.Now()}
return r
}
// ResponseTimes is a slice of response times
type ResponseTimes []ResponseTime
// Implements the `Len` method for the
// sort.Interface type
func (rs ResponseTimes) Len() int {
return len(rs)
}
// Implements the `Less` method for the
// sort.Interface type
func (rs ResponseTimes) Less(i, j int) bool {
return rs[i].Value < rs[j].Value
}
// Implements the `Swap` method for the
// sort.Interface type
func (rs ResponseTimes) Swap(i, j int) {
rs[i], rs[j] = rs[j], rs[i]
}
//////////////////////////////////
// ConcurrencyLimiter is a go routine safe struct that can be used to
// ensure that no more than a specifid max number of goroutines are
// executing.
type ConcurrencyLimiter struct {
inc chan chan struct{}
dec chan struct{}
max int
count int
}
// NewConcurrencyLimiter returns a configured limiter that will
// ensure that calls to Increment will block if the max is hit.
func NewConcurrencyLimiter(max int) *ConcurrencyLimiter {
c := &ConcurrencyLimiter{
inc: make(chan chan struct{}),
dec: make(chan struct{}, max),
max: max,
}
go c.handleLimits()
return c
}
// Increment will increase the count of running goroutines by 1.
// if the number is currently at the max, the call to Increment
// will block until another goroutine decrements.
func (c *ConcurrencyLimiter) Increment() {
r := make(chan struct{})
c.inc <- r
<-r
}
// Decrement will reduce the count of running goroutines by 1
func (c *ConcurrencyLimiter) Decrement() {
c.dec <- struct{}{}
}
// handleLimits runs in a goroutine to manage the count of
// running goroutines.
func (c *ConcurrencyLimiter) handleLimits() {
for {
r := <-c.inc
if c.count >= c.max {
<-c.dec
c.count--
}
c.count++
r <- struct{}{}
}
}

View File

@ -1,164 +0,0 @@
# Influx Stress tool -> `v2`
The design of the new stress tool was designed to:
* have higher potential write throughput than previous version
* have more schema expressibility for testing different load profiles and professional services
* have more granular reporting to be better able to draw conclusions from tests
In service of these requirements we designed a language that looks a lot like `influxql` to give the new test commands. Instead of a configuration file, the new stress test takes a list of these `Statements`.
The tool has the following components:
* Parser - parses the configuration file and turns it into an `[]Statement`. All code related to the parser is in `v2/stressql/`. The parser was designed as per @benbjohnson's great article on [parsers in go](https://blog.gopheracademy.com/advent-2014/parsers-lexers/).
* Statements - perform operations on target instance or change test environment. All code related to statements is in `v2/statement/`. The following are the available statements:
- `EXEC` - Still a TODO, planned to run outside scripts from the config file.
- `GO` - Prepend to an `INSERT` or `QUERY` statement to run concurrently.
- `INFLUXQL` - All valid `influxql` will be passed directly to the targeted instance. Useful for setting up complex downsampling environments or just your testing environment.
- `INSERT` - Generates points following a template
- `QUERY` - Runs a given query or generates sample queries given a companion `INSERT` statement
- `SET` - Changes the test parameters. Defaults are listed in the `README.md`
- `WAIT` - Required after a `GO` statement. Blocks till all proceeding statements finish.
* Clients - The statement, results and InfluxDB clients. This code lives in `v2/stress_client`
- `StressTest` - The `Statement` client. Also contains the results client.
- `stressClient` - A performant InfluxDB client. Makes `GET /query` and `POST /write` requests. Forwards the results to the results client.
![Influx Stress Design](./influx_stress_v2.png)
### Statements
`Statement` is an interface defined in `v2/statement/statement.go`:
```go
type Statement interface {
Run(s *stressClient.StressTest)
Report(s *stressClient.StressTest) string
SetID(s string)
}
```
* `Run` prompts the statement to carry out it's instructions. See the run functions of the various statements listed above for more information.
* `Report` retrieves and collates all recorded test data from the reporting InfluxDB instance.
* `SetID` gives the statement an ID. Used in the parser. Each `statementID` is an 8 character random string used for reporting.
### `Statement` -> `StressTest`
`Statement`s send `Package`s (queries or writes to the target database) or `Directives` (for changing test state) through the `StressTest` to the `stressClient` where they are processed.
```go
// v2/stress_client/package.go
// T is Query or Write
// StatementID is for reporting
type Package struct {
T Type
Body []byte
StatementID string
Tracer *Tracer
}
// v2/stress_client/directive.go
// Property is test state variable to change
// Value is the new value
type Directive struct {
Property string
Value string
Tracer *Tracer
}
```
The `Tracer` on both of these packages contains a `sync.WaitGroup` that prevents `Statement`s from returning before all their operations are finished. This `WaitGroup` is incremented in the `Run()` of the statement and decremented in `*StressTest.resultsListen()` after results are recorded in the database. This is well documented with inline comments. `Tracer`s also carry optional tags for reporting purposes.
```go
// v2/stress_client/tracer.go
type Tracer struct {
Tags map[string]string
sync.WaitGroup
}
```
### `StressTest`
The `StressTest` is the client for the statements through the `*StressTest.SendPackage()` and `*StressTest.SendDirective()` functions. It also contains some test state and the `ResultsClient`.
```go
type StressTest struct {
TestID string
TestName string
Precision string
StartDate string
BatchSize int
sync.WaitGroup
sync.Mutex
packageChan chan<- Package
directiveChan chan<- Directive
ResultsChan chan Response
communes map[string]*commune
ResultsClient influx.Client
}
```
### Reporting Client
The `ResultsClient` turns raw responses from InfluxDB into properly tagged points containing any relevant information for storage in another InfluxDB instance. The code for creating those points lives in `v2/stress_client/reporting.go`
### InfluxDB Instance (reporting)
This is `localhost:8086` by default. The results are currently stored in the `_stressTest` database.
### `stressClient`
An InfluxDB client designed for speed. `stressClient` also holds most test state.
```go
// v2/stress_client/stress_client.go
type stressClient struct {
testID string
// State for the Stress Test
addresses []string
precision string
startDate string
database string
wdelay string
qdelay string
// Channels from statements
packageChan <-chan Package
directiveChan <-chan Directive
// Response channel
responseChan chan<- Response
// Concurrency utilities
sync.WaitGroup
sync.Mutex
// Concurrency Limit for Writes and Reads
wconc int
qconc int
// Manage Read and Write concurrency separately
wc *ConcurrencyLimiter
rc *ConcurrencyLimiter
}
```
Code for handling the write path is in `v2/stress_client/stress_client_write.go` while the query path is in `v2/stress_client/stress_client_query.go`.
### InfluxDB Instance (stress test target)
The InfluxDB which is being put under stress.
### response data
`Response`s carry points from `stressClient` to the `ResultsClient`.
```go
// v2/stress_client/response.go
type Response struct {
Point *influx.Point
Tracer *Tracer
}
```

View File

@ -1,177 +0,0 @@
# Influx Stress Tool V2
```
$ influx_stress -v2 -config iql/file.iql
```
This stress tool works from list of InfluxQL-esque statements. The language has been extended to allow for some basic templating of fields, tags and measurements in both line protocol and query statements.
By default the test outputs a human readable report to `STDOUT` and records test statistics in an active installation of InfluxDB at `localhost:8086`.
To set state variables for the test such as the address of the Influx node use the following syntax:
```
# The values listed below are the default values for each of the parameters
# Pipe delineated list of addresses. For cluster: [192.168.0.10:8086|192.168.0.2:8086|192.168.0.3:8086]
# Queries and writes are round-robin to the configured addresses.
SET Addresses [localhost:8086]
# False (default) uses http, true uses https
SET SSL [false]
# Username for targeted influx server or cluster
SET Username []
# Password for targeted influx server or cluster
SET Password []
# Database to target for queries and writes. Works like the InfluxCLI USE
SET Database [stress]
# Precision for the data being written
# Only s and ns supported
SET Precision [s]
# Date the first written point will be timestamped
SET StartDate [2016-01-01]
# Size of batches to send to InfluxDB
SET BatchSize [5000]
# Time to wait between sending batches
SET WriteInterval [0s]
# Time to wait between sending queries
SET QueryInterval [0s]
# Number of concurrent writers
SET WriteConcurrency [15]
# Number of concurrent readers
SET QueryConcurrency [5]
```
The values in the example are also the defaults.
Valid line protocol will be forwarded right to the server making setting up your testing environment easy:
```
CREATE DATABASE thing
ALTER RETENTION POLICY default ON thing DURATION 1h REPLICATION 1
SET database [thing]
```
You can write points like this:
```
INSERT mockCpu
cpu,
host=server-[int inc(0) 10000],location=[string rand(8) 1000]
value=[float rand(1000) 0]
100000 10s
Explained:
# INSERT keyword kicks off the statement, next to it is the name of the statement for reporting and templated query generation
INSERT mockCpu
# Measurement
cpu,
# Tags - separated by commas. Tag values can be templates, mixed template and fixed values
host=server-[float rand(100) 10000],location=[int inc(0) 1000],fixed=[fix|fid|dor|pom|another_tag_value]
# Fields - separated by commas either templates, mixed template and fixed values
value=[float inc(0) 0]
# 'Timestamp' - Number of points to insert into this measurement and the amount of time between points
100000 10s
```
Each template contains 3 parts: a datatype (`str`, `float`, or `int`) a function which describes how the value changes between points: `inc(0)` is increasing and `rand(n)` is a random number between `0` and `n`. The last number is the number of unique values in the tag or field. `0` is unbounded. To make a tag
To run multiple insert statements at once:
```
GO INSERT devices
devices,
city=[str rand(8) 10],country=[str rand(8) 25],device_id=[str rand(10) 1000]
lat=[float rand(90) 0],lng=[float rand(120) 0],temp=[float rand(40) 0]
10000000 10s
GO INSERT devices2
devices2,
city=[str rand(8) 10],country=[str rand(8) 25],device_id=[str rand(10) 1000]
lat=[float rand(90) 0],lng=[float rand(120) 0],temp=[float rand(40) 0]
10000000 10s
WAIT
```
Fastest point generation and write load requires 3-4 running `GO INSERT` statements at a time.
You can run queries like this:
```
QUERY cpu
SELECT mean(value) FROM cpu WHERE host='server-1'
DO 1000
```
### Output:
Output for config file in this repo:
```
[√] "CREATE DATABASE thing" -> 1.806785ms
[√] "CREATE DATABASE thing2" -> 1.492504ms
SET Database = 'thing'
SET Precision = 's'
Go Write Statement: mockCpu
Points/Sec: 245997
Resp Time Average: 173.354445ms
Resp Time Standard Deviation: 123.80344ms
95th Percentile Write Response: 381.363503ms
Average Request Bytes: 276110
Successful Write Reqs: 20
Retries: 0
Go Query Statement: mockCpu
Resp Time Average: 3.140803ms
Resp Time Standard Deviation: 2.292328ms
95th Percentile Read Response: 5.915437ms
Query Resp Bytes Average: 16 bytes
Successful Queries: 10
WAIT -> 406.400059ms
SET DATABASE = 'thing2'
Go Write Statement: devices
Points/Sec: 163348
Resp Time Average: 132.553789ms
Resp Time Standard Deviation: 149.397972ms
95th Percentile Write Response: 567.987467ms
Average Request Bytes: 459999
Successful Write Reqs: 20
Retries: 0
Go Write Statement: devices2
Points/Sec: 160078
Resp Time Average: 133.303097ms
Resp Time Standard Deviation: 144.352404ms
95th Percentile Write Response: 560.565066ms
Average Request Bytes: 464999
Successful Write Reqs: 20
Retries: 0
Go Query Statement: fooName
Resp Time Average: 1.3307ms
Resp Time Standard Deviation: 640.249µs
95th Percentile Read Response: 2.668ms
Query Resp Bytes Average: 16 bytes
Successful Queries: 10
WAIT -> 624.585319ms
[√] "DROP DATABASE thing" -> 991.088464ms
[√] "DROP DATABASE thing2" -> 421.362831ms
```
### Next Steps:
##### Documentation
- Parser behavior and proper `.iql` syntax
- How the templated query generation works
- Collection of tested `.iql` files to simulate different loads
##### Performance
- `Commune`, a stuct to enable templated Query generation, is blocking writes when used, look into performance.
- Templated query generation is currently in a quazi-working state. See the above point.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 91 KiB

View File

@ -1,13 +0,0 @@
CREATE DATABASE stress
GO INSERT cpu
cpu,
host=server-[int inc(0) 100000],location=us-west
value=[int rand(100) 0]
10000000 10s
GO QUERY cpu
SELECT count(value) FROM cpu WHERE %t
DO 250
WAIT

View File

@ -1,45 +0,0 @@
CREATE DATABASE thing
CREATE DATABASE thing2
SET Database [thing]
SET Precision [s]
GO INSERT mockCpu
cpu,
host=server-[float inc(0) 10000],loc=[us-west|us-east|eu-north]
value=[int inc(100) 0]
100000 10s
GO QUERY mockCpu
SELECT mean(value) FROM cpu WHERE host='server-1'
DO 10
WAIT
SET DATABASE [thing2]
GO INSERT devices
devices,
city=[str rand(8) 100],country=[str rand(8) 25],device_id=[str rand(10) 100]
lat=[float rand(90) 0],lng=[float rand(120) 0],temp=[float rand(40) 0]
100000 10s
GO INSERT devices2
devices2,
city=[str rand(8) 100],country=[str rand(8) 25],device_id=[str rand(10) 100]
lat=[float rand(90) 0],lng=[float rand(120) 0],temp=[float rand(40) 0]
100000 10s
GO QUERY fooName
SELECT count(temp) FROM devices WHERE temp > 30
DO 10
WAIT
DROP DATABASE thing
DROP DATABASE thing2
WAIT

View File

@ -1,59 +0,0 @@
package stress
import (
"fmt"
"log"
"time"
influx "github.com/influxdata/influxdb/client/v2"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
"github.com/influxdata/influxdb/stress/v2/stressql"
)
// RunStress takes a configFile and kicks off the stress test
func RunStress(file string) {
// Spin up the Client
s := stressClient.NewStressTest()
// Parse the file into Statements
stmts, err := stressql.ParseStatements(file)
// Log parse errors and quit if found
if err != nil {
log.Fatalf("Parsing Error\n error: %v\n", err)
}
// Run all statements
for _, stmt := range stmts {
stmt.Run(s)
}
// Clear out the batch of unsent response points
resp := blankResponse()
s.ResultsChan <- resp
resp.Tracer.Wait()
// Compile all Reports
for _, stmt := range stmts {
fmt.Println(stmt.Report(s))
}
}
func blankResponse() stressClient.Response {
// Points must have at least one field
fields := map[string]interface{}{"done": true}
// Make a 'blank' point
p, err := influx.NewPoint("done", make(map[string]string), fields, time.Now())
// Panic on error
if err != nil {
log.Fatalf("Error creating blank response point\n error: %v\n", err)
}
// Add a tracer to prevent program from returning too early
tracer := stressClient.NewTracer(make(map[string]string))
// Add to the WaitGroup
tracer.Add(1)
// Make a new response with the point and the tracer
resp := stressClient.NewResponse(p, tracer)
return resp
}

View File

@ -1,32 +0,0 @@
package statement
import (
"time"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
// ExecStatement run outside scripts. This functionality is not built out
// TODO: Wire up!
type ExecStatement struct {
StatementID string
Script string
runtime time.Duration
}
// SetID statisfies the Statement Interface
func (i *ExecStatement) SetID(s string) {
i.StatementID = s
}
// Run statisfies the Statement Interface
func (i *ExecStatement) Run(s *stressClient.StressTest) {
runtime := time.Now()
i.runtime = time.Since(runtime)
}
// Report statisfies the Statement Interface
func (i *ExecStatement) Report(s *stressClient.StressTest) string {
return ""
}

View File

@ -1,41 +0,0 @@
package statement
import (
"testing"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
func TestExecSetID(t *testing.T) {
e := newTestExec()
newID := "oaijnifo"
e.SetID(newID)
if e.StatementID != newID {
t.Errorf("Expected: %v\nGot: %v\n", newID, e.StatementID)
}
}
func TestExecRun(t *testing.T) {
e := newTestExec()
s, _, _ := stressClient.NewTestStressTest()
e.Run(s)
if e == nil {
t.Fail()
}
}
func TestExecReport(t *testing.T) {
e := newTestExec()
s, _, _ := stressClient.NewTestStressTest()
rep := e.Report(s)
if rep != "" {
t.Fail()
}
}
func newTestExec() *ExecStatement {
return &ExecStatement{
StatementID: "fooID",
Script: "fooscript.txt",
}
}

View File

@ -1,176 +0,0 @@
package statement
import (
crypto "crypto/rand"
"fmt"
"math/rand"
)
// ################
// # Function #
// ################
// Function is a struct that holds information for generating values in templated points
type Function struct {
Type string
Fn string
Argument int
Count int
}
// NewStringer creates a new Stringer
func (f *Function) NewStringer(series int) Stringer {
var fn Stringer
switch f.Type {
case "int":
fn = NewIntFunc(f.Fn, f.Argument)
case "float":
fn = NewFloatFunc(f.Fn, f.Argument)
case "str":
fn = NewStrFunc(f.Fn, f.Argument)
default:
fn = func() string { return "STRINGER ERROR" }
}
if int(f.Count) != 0 {
return cycle(f.Count, fn)
}
return nTimes(series, fn)
}
// ################
// # Stringers #
// ################
// Stringers is a collection of Stringer
type Stringers []Stringer
// Eval returns an array of all the Stringer functions evaluated once
func (s Stringers) Eval(time func() int64) []interface{} {
arr := make([]interface{}, len(s)+1)
for i, st := range s {
arr[i] = st()
}
arr[len(s)] = time()
return arr
}
// Stringer is a function that returns a string
type Stringer func() string
func randStr(n int) func() string {
return func() string {
b := make([]byte, n/2)
_, _ = crypto.Read(b)
return fmt.Sprintf("%x", b)
}
}
// NewStrFunc reates a new striger to create strings for templated writes
func NewStrFunc(fn string, arg int) Stringer {
switch fn {
case "rand":
return randStr(arg)
default:
return func() string { return "STR ERROR" }
}
}
func randFloat(n int) func() string {
return func() string {
return fmt.Sprintf("%v", rand.Intn(n))
}
}
func incFloat(n int) func() string {
i := n
return func() string {
s := fmt.Sprintf("%v", i)
i++
return s
}
}
// NewFloatFunc reates a new striger to create float values for templated writes
func NewFloatFunc(fn string, arg int) Stringer {
switch fn {
case "rand":
return randFloat(arg)
case "inc":
return incFloat(arg)
default:
return func() string { return "FLOAT ERROR" }
}
}
func randInt(n int) Stringer {
return func() string {
return fmt.Sprintf("%vi", rand.Intn(n))
}
}
func incInt(n int) Stringer {
i := n
return func() string {
s := fmt.Sprintf("%vi", i)
i++
return s
}
}
// NewIntFunc reates a new striger to create int values for templated writes
func NewIntFunc(fn string, arg int) Stringer {
switch fn {
case "rand":
return randInt(arg)
case "inc":
return incInt(arg)
default:
return func() string { return "INT ERROR" }
}
}
// nTimes will return the previous return value of a function
// n-many times before calling the function again
func nTimes(n int, fn Stringer) Stringer {
i := 0
t := fn()
return func() string {
i++
if i > n {
t = fn()
i = 1
}
return t
}
}
// cycle will cycle through a list of values before repeating them
func cycle(n int, fn Stringer) Stringer {
if n == 0 {
return fn
}
i := 0
cache := make([]string, n)
t := fn()
cache[i] = t
return func() string {
i++
if i < n {
cache[i] = fn()
}
t = cache[(i-1)%n]
return t
}
}

View File

@ -1,139 +0,0 @@
package statement
import (
"testing"
)
func TestNewStrRandStringer(t *testing.T) {
function := newStrRandFunction()
strRandStringer := function.NewStringer(10)
s := strRandStringer()
if len(s) != function.Argument {
t.Errorf("Expected: %v\nGot: %v\n", function.Argument, len(s))
}
}
func TestNewIntIncStringer(t *testing.T) {
function := newIntIncFunction()
intIncStringer := function.NewStringer(10)
s := intIncStringer()
if s != "0i" {
t.Errorf("Expected: 0i\nGot: %v\n", s)
}
}
func TestNewIntRandStringer(t *testing.T) {
function := newIntRandFunction()
intRandStringer := function.NewStringer(10)
s := intRandStringer()
if parseInt(s[:len(s)-1]) > function.Argument {
t.Errorf("Expected value below: %v\nGot value: %v\n", function.Argument, s)
}
}
func TestNewFloatIncStringer(t *testing.T) {
function := newFloatIncFunction()
floatIncStringer := function.NewStringer(10)
s := floatIncStringer()
if parseFloat(s) != function.Argument {
t.Errorf("Expected value: %v\nGot: %v\n", function.Argument, s)
}
}
func TestNewFloatRandStringer(t *testing.T) {
function := newFloatRandFunction()
floatRandStringer := function.NewStringer(10)
s := floatRandStringer()
if parseFloat(s) > function.Argument {
t.Errorf("Expected value below: %v\nGot value: %v\n", function.Argument, s)
}
}
func TestStringersEval(t *testing.T) {
// Make the *Function(s)
strRandFunction := newStrRandFunction()
intIncFunction := newIntIncFunction()
intRandFunction := newIntRandFunction()
floatIncFunction := newFloatIncFunction()
floatRandFunction := newFloatRandFunction()
// Make the *Stringer(s)
strRandStringer := strRandFunction.NewStringer(10)
intIncStringer := intIncFunction.NewStringer(10)
intRandStringer := intRandFunction.NewStringer(10)
floatIncStringer := floatIncFunction.NewStringer(10)
floatRandStringer := floatRandFunction.NewStringer(10)
// Make the *Stringers
stringers := Stringers([]Stringer{strRandStringer, intIncStringer, intRandStringer, floatIncStringer, floatRandStringer})
// Spoff the Time function
// Call *Stringers.Eval
values := stringers.Eval(spoofTime)
// Check the strRandFunction
if len(values[0].(string)) != strRandFunction.Argument {
t.Errorf("Expected: %v\nGot: %v\n", strRandFunction.Argument, len(values[0].(string)))
}
// Check the intIncFunction
if values[1].(string) != "0i" {
t.Errorf("Expected: 0i\nGot: %v\n", values[1].(string))
}
// Check the intRandFunction
s := values[2].(string)
if parseInt(s[:len(s)-1]) > intRandFunction.Argument {
t.Errorf("Expected value below: %v\nGot value: %v\n", intRandFunction.Argument, s)
}
// Check the floatIncFunction
if parseFloat(values[3].(string)) != floatIncFunction.Argument {
t.Errorf("Expected value: %v\nGot: %v\n", floatIncFunction.Argument, values[3])
}
// Check the floatRandFunction
if parseFloat(values[4].(string)) > floatRandFunction.Argument {
t.Errorf("Expected value below: %v\nGot value: %v\n", floatRandFunction.Argument, values[4])
}
}
func spoofTime() int64 {
return int64(8)
}
func newStrRandFunction() *Function {
return &Function{
Type: "str",
Fn: "rand",
Argument: 8,
Count: 1000,
}
}
func newIntIncFunction() *Function {
return &Function{
Type: "int",
Fn: "inc",
Argument: 0,
Count: 0,
}
}
func newIntRandFunction() *Function {
return &Function{
Type: "int",
Fn: "rand",
Argument: 100,
Count: 1000,
}
}
func newFloatIncFunction() *Function {
return &Function{
Type: "float",
Fn: "inc",
Argument: 0,
Count: 1000,
}
}
func newFloatRandFunction() *Function {
return &Function{
Type: "float",
Fn: "rand",
Argument: 100,
Count: 1000,
}
}

View File

@ -1,40 +0,0 @@
package statement
import (
"fmt"
"time"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
// GoStatement is a Statement Implementation to allow other statements to be run concurrently
type GoStatement struct {
Statement
StatementID string
}
// SetID statisfies the Statement Interface
func (i *GoStatement) SetID(s string) {
i.StatementID = s
}
// Run statisfies the Statement Interface
func (i *GoStatement) Run(s *stressClient.StressTest) {
// TODO: remove
switch i.Statement.(type) {
case *QueryStatement:
time.Sleep(1 * time.Second)
}
s.Add(1)
go func() {
i.Statement.Run(s)
s.Done()
}()
}
// Report statisfies the Statement Interface
func (i *GoStatement) Report(s *stressClient.StressTest) string {
return fmt.Sprintf("Go %v", i.Statement.Report(s))
}

View File

@ -1,41 +0,0 @@
package statement
import (
"testing"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
func TestGoSetID(t *testing.T) {
e := newTestGo()
newID := "oaijnifo"
e.SetID(newID)
if e.StatementID != newID {
t.Errorf("Expected: %v\nGot: %v\n", newID, e.StatementID)
}
}
func TestGoRun(t *testing.T) {
e := newTestGo()
s, _, _ := stressClient.NewTestStressTest()
e.Run(s)
if e == nil {
t.Fail()
}
}
func TestGoReport(t *testing.T) {
e := newTestGo()
s, _, _ := stressClient.NewTestStressTest()
report := e.Report(s)
if report != "Go " {
t.Errorf("Expected: %v\nGot: %v\n", "Go ", report)
}
}
func newTestGo() *GoStatement {
return &GoStatement{
Statement: newTestExec(),
StatementID: "fooID",
}
}

View File

@ -1,69 +0,0 @@
package statement
import (
"log"
"time"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
// InfluxqlStatement is a Statement Implementation that allows statements that parse in InfluxQL to be passed directly to the target instance
type InfluxqlStatement struct {
StatementID string
Query string
Tracer *stressClient.Tracer
}
func (i *InfluxqlStatement) tags() map[string]string {
tags := make(map[string]string)
return tags
}
// SetID statisfies the Statement Interface
func (i *InfluxqlStatement) SetID(s string) {
i.StatementID = s
}
// Run statisfies the Statement Interface
func (i *InfluxqlStatement) Run(s *stressClient.StressTest) {
// Set the tracer
i.Tracer = stressClient.NewTracer(i.tags())
// Make the Package
p := stressClient.NewPackage(stressClient.Query, []byte(i.Query), i.StatementID, i.Tracer)
// Increment the tracer
i.Tracer.Add(1)
// Send the Package
s.SendPackage(p)
// Wait for all operations to finish
i.Tracer.Wait()
}
// Report statisfies the Statement Interface
// No test coverage, fix
func (i *InfluxqlStatement) Report(s *stressClient.StressTest) (out string) {
allData := s.GetStatementResults(i.StatementID, "query")
iqlr := &influxQlReport{
statement: i.Query,
columns: allData[0].Series[0].Columns,
values: allData[0].Series[0].Values,
}
iqlr.responseTime = time.Duration(responseTimes(iqlr.columns, iqlr.values)[0].Value)
switch countSuccesses(iqlr.columns, iqlr.values) {
case 0:
iqlr.success = false
case 1:
iqlr.success = true
default:
log.Fatal("Error fetching response for InfluxQL statement")
}
return iqlr.String()
}

View File

@ -1,44 +0,0 @@
package statement
import (
"testing"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
func TestInfluxQlSetID(t *testing.T) {
e := newTestInfluxQl()
newID := "oaijnifo"
e.SetID(newID)
if e.StatementID != newID {
t.Errorf("Expected: %v\nGot: %v\n", newID, e.StatementID)
}
}
func TestInfluxQlRun(t *testing.T) {
e := newTestInfluxQl()
s, packageCh, _ := stressClient.NewTestStressTest()
go func() {
for pkg := range packageCh {
if pkg.T != stressClient.Query {
t.Errorf("Expected package to be Query\nGot: %v", pkg.T)
}
if string(pkg.Body) != e.Query {
t.Errorf("Expected query: %v\nGot: %v", e.Query, string(pkg.Body))
}
if pkg.StatementID != e.StatementID {
t.Errorf("Expected statementID: %v\nGot: %v", e.StatementID, pkg.StatementID)
}
pkg.Tracer.Done()
}
}()
e.Run(s)
}
func newTestInfluxQl() *InfluxqlStatement {
return &InfluxqlStatement{
Query: "CREATE DATABASE foo",
Tracer: stressClient.NewTracer(make(map[string]string)),
StatementID: "fooID",
}
}

View File

@ -1,214 +0,0 @@
package statement
import (
"bytes"
"fmt"
"log"
"strconv"
"strings"
"sync"
"time"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
// InsertStatement is a Statement Implementation that creates points to be written to the target InfluxDB instance
type InsertStatement struct {
TestID string
StatementID string
// Statement Name
Name string
// Template string for points. Filled by the output of stringers
TemplateString string
// TagCount is used to find the number of series in the dataset
TagCount int
// The Tracer prevents InsertStatement.Run() from returning early
Tracer *stressClient.Tracer
// Timestamp is #points to write and percision
Timestamp *Timestamp
// Templates turn into stringers
Templates Templates
stringers Stringers
// Number of series in this insert Statement
series int
// Returns the proper time for the next point
time func() int64
// Concurrency utiliities
sync.WaitGroup
sync.Mutex
// Timer for runtime and pps calculation
runtime time.Duration
}
func (i *InsertStatement) tags() map[string]string {
tags := map[string]string{
"number_fields": i.numFields(),
"number_series": fmtInt(i.series),
"number_points_write": fmtInt(i.Timestamp.Count),
}
return tags
}
// SetID statisfies the Statement Interface
func (i *InsertStatement) SetID(s string) {
i.StatementID = s
}
// SetVars sets up the environment for InsertStatement to call it's Run function
func (i *InsertStatement) SetVars(s *stressClient.StressTest) chan<- string {
// Set the #series at 1 to start
i.series = 1
// Num series is the product of the cardinality of the tags
for _, tmpl := range i.Templates[0:i.TagCount] {
i.series *= tmpl.numSeries()
}
// make stringers from the templates
i.stringers = i.Templates.Init(i.series)
// Set the time function, keeps track of 'time' of the points being created
i.time = i.Timestamp.Time(s.StartDate, i.series, s.Precision)
// Set a commune on the StressTest
s.Lock()
comCh := s.SetCommune(i.Name)
s.Unlock()
// Set the tracer
i.Tracer = stressClient.NewTracer(i.tags())
return comCh
}
// Run statisfies the Statement Interface
func (i *InsertStatement) Run(s *stressClient.StressTest) {
// Set variables on the InsertStatement and make the comCh
comCh := i.SetVars(s)
// TODO: Refactor to eleminate the ctr
// Start the counter
ctr := 0
// Create the first bytes buffer
buf := bytes.NewBuffer([]byte{})
runtime := time.Now()
for k := 0; k < i.Timestamp.Count; k++ {
// Increment the counter. ctr == k + 1?
ctr++
// Make the point from the template string and the stringers
point := fmt.Sprintf(i.TemplateString, i.stringers.Eval(i.time)...)
// Add the string to the buffer
buf.WriteString(point)
// Add a newline char to seperate the points
buf.WriteString("\n")
// If len(batch) == batchSize then send it
if ctr%s.BatchSize == 0 && ctr != 0 {
b := buf.Bytes()
// Trimming the trailing newline character
b = b[0 : len(b)-1]
// Create the package
p := stressClient.NewPackage(stressClient.Write, b, i.StatementID, i.Tracer)
// Use Tracer to wait for all operations to finish
i.Tracer.Add(1)
// Send the package
s.SendPackage(p)
// Reset the bytes Buffer
temp := bytes.NewBuffer([]byte{})
buf = temp
}
// TODO: Racy
// Has to do with InsertStatement and QueryStatement communication
if len(comCh) < cap(comCh) {
select {
case comCh <- point:
break
default:
break
}
}
}
// If There are additional points remaining in the buffer send them before exiting
if buf.Len() != 0 {
b := buf.Bytes()
// Trimming the trailing newline character
b = b[0 : len(b)-1]
// Create the package
p := stressClient.NewPackage(stressClient.Write, b, i.StatementID, i.Tracer)
// Use Tracer to wait for all operations to finish
i.Tracer.Add(1)
// Send the package
s.SendPackage(p)
}
// Wait for all tracers to decrement
i.Tracer.Wait()
// Stop the timer
i.runtime = time.Since(runtime)
}
// Report statisfies the Statement Interface
func (i *InsertStatement) Report(s *stressClient.StressTest) string {
// Pull data via StressTest client
allData := s.GetStatementResults(i.StatementID, "write")
if allData == nil || allData[0].Series == nil {
log.Fatalf("No data returned for write report\n Statement Name: %v\n Statement ID: %v\n", i.Name, i.StatementID)
}
ir := &insertReport{
name: i.Name,
columns: allData[0].Series[0].Columns,
values: allData[0].Series[0].Values,
}
responseTimes := responseTimes(ir.columns, ir.values)
ir.percentile = percentile(responseTimes)
ir.avgResponseTime = avgDuration(responseTimes)
ir.stdDevResponseTime = stddevDuration(responseTimes)
ir.pointsPerSecond = int(float64(i.Timestamp.Count) / i.runtime.Seconds())
ir.numRetries = countRetries(ir.columns, ir.values)
ir.successfulWrites = countSuccesses(ir.columns, ir.values)
ir.avgRequestBytes = numberBytes(ir.columns, ir.values)
return ir.String()
}
func (i *InsertStatement) numFields() string {
pt := strings.Split(i.TemplateString, " ")
fields := strings.Split(pt[1], ",")
return fmtInt(len(fields))
}
func fmtInt(i int) string {
return strconv.FormatInt(int64(i), 10)
}

View File

@ -1,50 +0,0 @@
package statement
import (
"strings"
"testing"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
func TestInsertSetID(t *testing.T) {
e := newTestInsert()
newID := "oaijnifo"
e.SetID(newID)
if e.StatementID != newID {
t.Errorf("Expected: %v\nGot: %v\n", newID, e.StatementID)
}
}
func TestInsertRun(t *testing.T) {
i := newTestInsert()
s, packageCh, _ := stressClient.NewTestStressTest()
// Listen to the other side of the directiveCh
go func() {
for pkg := range packageCh {
countPoints := i.Timestamp.Count
batchSize := s.BatchSize
got := len(strings.Split(string(pkg.Body), "\n"))
switch got {
case countPoints % batchSize:
case batchSize:
default:
t.Errorf("countPoints: %v\nbatchSize: %v\ngot: %v\n", countPoints, batchSize, got)
}
pkg.Tracer.Done()
}
}()
i.Run(s)
}
func newTestInsert() *InsertStatement {
return &InsertStatement{
TestID: "foo_test",
StatementID: "foo_ID",
Name: "foo_name",
TemplateString: "cpu,%v %v %v",
Timestamp: newTestTimestamp(),
Templates: newTestTemplates(),
TagCount: 1,
}
}

View File

@ -1,161 +0,0 @@
package statement
import (
"fmt"
"log"
"time"
"github.com/influxdata/influxdb/models"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
// QueryStatement is a Statement Implementation to run queries on the target InfluxDB instance
type QueryStatement struct {
StatementID string
Name string
// TemplateString is a query template that can be filled in by Args
TemplateString string
Args []string
// Number of queries to run
Count int
// Tracer for tracking returns
Tracer *stressClient.Tracer
// track time for all queries
runtime time.Duration
}
// This function adds tags to the recording points
func (i *QueryStatement) tags() map[string]string {
tags := make(map[string]string)
return tags
}
// SetID statisfies the Statement Interface
func (i *QueryStatement) SetID(s string) {
i.StatementID = s
}
// Run statisfies the Statement Interface
func (i *QueryStatement) Run(s *stressClient.StressTest) {
i.Tracer = stressClient.NewTracer(i.tags())
vals := make(map[string]interface{})
var point models.Point
runtime := time.Now()
for j := 0; j < i.Count; j++ {
// If the query is a simple query, send it.
if len(i.Args) == 0 {
b := []byte(i.TemplateString)
// Make the package
p := stressClient.NewPackage(stressClient.Query, b, i.StatementID, i.Tracer)
// Increment the tracer
i.Tracer.Add(1)
// Send the package
s.SendPackage(p)
} else {
// Otherwise cherry pick field values from the commune?
// TODO: Currently the program lock up here if s.GetPoint
// cannot return a value, which can happen.
// See insert.go
s.Lock()
point = s.GetPoint(i.Name, s.Precision)
s.Unlock()
setMapValues(vals, point)
// Set the template string with args from the commune
b := []byte(fmt.Sprintf(i.TemplateString, setArgs(vals, i.Args)...))
// Make the package
p := stressClient.NewPackage(stressClient.Query, b, i.StatementID, i.Tracer)
// Increment the tracer
i.Tracer.Add(1)
// Send the package
s.SendPackage(p)
}
}
// Wait for all operations to finish
i.Tracer.Wait()
// Stop time timer
i.runtime = time.Since(runtime)
}
// Report statisfies the Statement Interface
func (i *QueryStatement) Report(s *stressClient.StressTest) string {
// Pull data via StressTest client
allData := s.GetStatementResults(i.StatementID, "query")
if len(allData) == 0 || allData[0].Series == nil {
log.Fatalf("No data returned for query report\n Statement Name: %v\n Statement ID: %v\n", i.Name, i.StatementID)
}
qr := &queryReport{
name: i.Name,
columns: allData[0].Series[0].Columns,
values: allData[0].Series[0].Values,
}
responseTimes := responseTimes(qr.columns, qr.values)
qr.percentile = percentile(responseTimes)
qr.avgResponseTime = avgDuration(responseTimes)
qr.stdDevResponseTime = stddevDuration(responseTimes)
qr.successfulReads = countSuccesses(qr.columns, qr.values)
qr.responseBytes = numberBytes(qr.columns, qr.values)
return qr.String()
}
func getRandomTagPair(m models.Tags) string {
for k, v := range m {
return fmt.Sprintf("%v='%v'", k, v)
}
return ""
}
func getRandomFieldKey(m map[string]interface{}) string {
for k := range m {
return fmt.Sprintf("%v", k)
}
return ""
}
func setMapValues(m map[string]interface{}, p models.Point) {
fields, err := p.Fields()
if err != nil {
panic(err)
}
m["%f"] = getRandomFieldKey(fields)
m["%m"] = string(p.Name())
m["%t"] = getRandomTagPair(p.Tags())
m["%a"] = p.UnixNano()
}
func setArgs(m map[string]interface{}, args []string) []interface{} {
values := make([]interface{}, len(args))
for i, arg := range args {
values[i] = m[arg]
}
return values
}

View File

@ -1,42 +0,0 @@
package statement
import (
"testing"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
func TestQuerySetID(t *testing.T) {
e := newTestQuery()
newID := "oaijnifo"
e.SetID(newID)
if e.StatementID != newID {
t.Errorf("Expected: %v\nGot: %v\n", newID, e.StatementID)
}
}
func TestQueryRun(t *testing.T) {
i := newTestQuery()
s, packageCh, _ := stressClient.NewTestStressTest()
// Listen to the other side of the directiveCh
go func() {
for pkg := range packageCh {
if i.TemplateString != string(pkg.Body) {
t.Fail()
}
pkg.Tracer.Done()
}
}()
i.Run(s)
}
func newTestQuery() *QueryStatement {
return &QueryStatement{
StatementID: "foo_ID",
Name: "foo_name",
TemplateString: "SELECT count(value) FROM cpu",
Args: []string{},
Count: 5,
Tracer: stressClient.NewTracer(map[string]string{}),
}
}

View File

@ -1,237 +0,0 @@
package statement
import (
"encoding/json"
"fmt"
"log"
"math"
"sort"
"time"
influx "github.com/influxdata/influxdb/client/v2"
)
// TODO: Refactor this file to utilize a common interface
// This will make adding new reports easier in the future
// Runs performance numbers for insert statements
type insertReport struct {
name string
numRetries int
pointsPerSecond int
successfulWrites int
avgRequestBytes int
avgResponseTime time.Duration
stdDevResponseTime time.Duration
percentile time.Duration
columns []string
values [][]interface{}
}
// Returns the version of the report that is output to STDOUT
func (ir *insertReport) String() string {
tmplString := `Write Statement: %v
Points/Sec: %v
Resp Time Average: %v
Resp Time Standard Deviation: %v
95th Percentile Write Response: %v
Average Request Bytes: %v
Successful Write Reqs: %v
Retries: %v`
return fmt.Sprintf(tmplString,
ir.name,
ir.pointsPerSecond,
ir.avgResponseTime,
ir.stdDevResponseTime,
ir.percentile,
ir.avgRequestBytes,
ir.successfulWrites,
ir.numRetries)
}
// Returns a point representation of the report to be written to the ResultsDB
func (ir *insertReport) Point() *influx.Point {
measurement := "testDefault"
tags := map[string]string{}
fields := map[string]interface{}{"field": "blank"}
point, err := influx.NewPoint(measurement, tags, fields, time.Now())
if err != nil {
log.Fatalf("Error creating insertReport point\n measurement: %v\n tags: %v\n fields: %v\n error: %v\n", measurement, tags, fields, err)
}
return point
}
// Runs performance numbers for query statements
type queryReport struct {
name string
successfulReads int
responseBytes int
stddevResponseBytes int
avgResponseTime time.Duration
stdDevResponseTime time.Duration
percentile time.Duration
columns []string
values [][]interface{}
}
// Returns the version of the report that is output to STDOUT
func (qr *queryReport) String() string {
tmplString := `Query Statement: %v
Resp Time Average: %v
Resp Time Standard Deviation: %v
95th Percentile Read Response: %v
Query Resp Bytes Average: %v bytes
Successful Queries: %v`
return fmt.Sprintf(tmplString,
qr.name,
qr.avgResponseTime,
qr.stdDevResponseTime,
qr.percentile,
qr.responseBytes,
qr.successfulReads)
}
// Returns a point representation of the report to be written to the ResultsDB
func (qr *queryReport) Point() *influx.Point {
measurement := "testDefault"
tags := map[string]string{}
fields := map[string]interface{}{"field": "blank"}
point, err := influx.NewPoint(measurement, tags, fields, time.Now())
if err != nil {
log.Fatalf("Error creating queryReport point\n measurement: %v\n tags: %v\n fields: %v\n error: %v\n", measurement, tags, fields, err)
}
return point
}
// Runs performance numbers for InfluxQL statements
type influxQlReport struct {
statement string
responseTime time.Duration
success bool
columns []string
values [][]interface{}
}
// Returns the version of the report that is output to STDOUT
func (iqlr *influxQlReport) String() string {
// Fancy format success
var success string
switch iqlr.success {
case true:
success = "[√]"
case false:
success = "[X]"
}
return fmt.Sprintf("%v '%v' -> %v", success, iqlr.statement, iqlr.responseTime)
}
// Returns a point representation of the report to be written to the ResultsDB
func (iqlr *influxQlReport) Point() *influx.Point {
measurement := "testDefault"
tags := map[string]string{}
fields := map[string]interface{}{"field": "blank"}
point, err := influx.NewPoint(measurement, tags, fields, time.Now())
if err != nil {
log.Fatalf("Error creating influxQL point\n measurement: %v\n tags: %v\n fields: %v\n error: %v\n", measurement, tags, fields, err)
}
return point
}
// Given a field or tag name this function returns the index where the values are found
func getColumnIndex(col string, columns []string) int {
index := -1
for i, column := range columns {
if column == col {
index = i
}
}
return index
}
// Given a full set of results pulls the average num_bytes
func numberBytes(columns []string, values [][]interface{}) int {
out := 0
index := getColumnIndex("num_bytes", columns)
for _, val := range values {
reqBytes, err := val[index].(json.Number).Int64()
if err != nil {
log.Fatalf("Error coercing json.Number to Int64\n json.Number:%v\n error: %v\n", val[index], err)
}
out += int(reqBytes)
}
return out / len(values)
}
// Counts the number of 200(query) or 204(write) responses and returns them
func countSuccesses(columns []string, values [][]interface{}) (out int) {
index := getColumnIndex("status_code", columns)
for _, val := range values {
status, err := val[index].(json.Number).Int64()
if err != nil {
log.Fatalf("Error coercing json.Number to Int64\n json.Number:%v\n error: %v\n", val[index], err)
}
if status == 204 || status == 200 {
out++
}
}
return out
}
// Counts number of 500 status codes
func countRetries(columns []string, values [][]interface{}) (out int) {
index := getColumnIndex("status_code", columns)
for _, val := range values {
status, err := val[index].(json.Number).Int64()
if err != nil {
log.Fatalf("Error coercing json.Number to Int64\n json.Number:%v\n error: %v\n", val[index], err)
}
if status == 500 {
out++
}
}
return out
}
// Pulls out the response_time_ns values and formats them into ResponseTimes for reporting
func responseTimes(columns []string, values [][]interface{}) (rs ResponseTimes) {
rs = make([]ResponseTime, 0)
index := getColumnIndex("response_time_ns", columns)
for _, val := range values {
respTime, err := val[index].(json.Number).Int64()
if err != nil {
log.Fatalf("Error coercing json.Number to Int64\n json.Number:%v\n error: %v\n", val[index], err)
}
rs = append(rs, NewResponseTime(int(respTime)))
}
return rs
}
// Returns the 95th perecntile response time
func percentile(rs ResponseTimes) time.Duration {
sort.Sort(rs)
return time.Duration(rs[(len(rs) * 19 / 20)].Value)
}
// Returns the average response time
func avgDuration(rs ResponseTimes) (out time.Duration) {
for _, t := range rs {
out += time.Duration(t.Value)
}
return out / time.Duration(len(rs))
}
// Returns the standard deviation of a sample of response times
func stddevDuration(rs ResponseTimes) (out time.Duration) {
avg := avgDuration(rs)
for _, t := range rs {
out += (avg - time.Duration(t.Value)) * (avg - time.Duration(t.Value))
}
return time.Duration(int64(math.Sqrt(float64(out) / float64(len(rs)))))
}

View File

@ -1,210 +0,0 @@
package statement
import (
"encoding/json"
"fmt"
"strings"
"testing"
"time"
)
func TestInsertReportString(t *testing.T) {
ir := newTestInsertReport()
tmplString := `Write Statement: %v
Points/Sec: %v
Resp Time Average: %v
Resp Time Standard Deviation: %v
95th Percentile Write Response: %v
Average Request Bytes: %v
Successful Write Reqs: %v
Retries: %v`
expected := fmt.Sprintf(tmplString,
ir.name,
ir.pointsPerSecond,
ir.avgResponseTime,
ir.stdDevResponseTime,
ir.percentile,
ir.avgRequestBytes,
ir.successfulWrites,
ir.numRetries)
got := ir.String()
if expected != got {
t.Fail()
}
}
func TestInsertReportPoint(t *testing.T) {
ir := newTestInsertReport()
expected := "testDefault"
got := strings.Split(ir.Point().String(), " ")[0]
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}
func TestQueryReportString(t *testing.T) {
qr := newTestQueryReport()
tmplString := `Query Statement: %v
Resp Time Average: %v
Resp Time Standard Deviation: %v
95th Percentile Read Response: %v
Query Resp Bytes Average: %v bytes
Successful Queries: %v`
expected := fmt.Sprintf(tmplString,
qr.name,
qr.avgResponseTime,
qr.stdDevResponseTime,
qr.percentile,
qr.responseBytes,
qr.successfulReads)
got := qr.String()
if expected != got {
t.Fail()
}
}
func TestQueryReportPoint(t *testing.T) {
qr := newTestQueryReport()
expected := "testDefault"
got := strings.Split(qr.Point().String(), " ")[0]
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}
func TestInfluxQLReportString(t *testing.T) {
iqlr := newTestInfluxQLReport()
expected := fmt.Sprintf("[X] '%v' -> %v", iqlr.statement, iqlr.responseTime)
got := iqlr.String()
if expected != got {
t.Fail()
}
}
func TestInfluxQLReportPoint(t *testing.T) {
iqlr := newTestInfluxQLReport()
expected := "testDefault"
got := strings.Split(iqlr.Point().String(), " ")[0]
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}
func newTestInsertReport() *insertReport {
return &insertReport{
name: "foo_name",
numRetries: 0,
pointsPerSecond: 500000,
successfulWrites: 20000,
avgRequestBytes: 18932,
avgResponseTime: time.Duration(int64(20000)),
stdDevResponseTime: time.Duration(int64(20000)),
percentile: time.Duration(int64(20000)),
}
}
func newTestQueryReport() *queryReport {
return &queryReport{
name: "foo_name",
successfulReads: 2000,
responseBytes: 39049,
stddevResponseBytes: 9091284,
avgResponseTime: 139082,
stdDevResponseTime: 29487,
percentile: 8273491,
}
}
func newTestInfluxQLReport() *influxQlReport {
return &influxQlReport{
statement: "foo_name",
responseTime: time.Duration(int64(20000)),
success: false,
}
}
func TestGetColumnIndex(t *testing.T) {
col := "thing"
columns := []string{"thing"}
expected := 0
got := getColumnIndex(col, columns)
if expected != got {
t.Fail()
}
}
func TestNumberBytes(t *testing.T) {
columns := []string{"num_bytes"}
values := [][]interface{}{[]interface{}{json.Number("1")}}
expected := 1
got := numberBytes(columns, values)
if expected != got {
t.Fail()
}
}
func TestCountSuccesses(t *testing.T) {
columns := []string{"status_code"}
values := [][]interface{}{[]interface{}{json.Number("200")}}
expected := 1
got := countSuccesses(columns, values)
if expected != got {
t.Fail()
}
}
func TestCountRetries(t *testing.T) {
columns := []string{"status_code"}
values := [][]interface{}{[]interface{}{json.Number("500")}}
expected := 1
got := countRetries(columns, values)
if expected != got {
t.Fail()
}
}
func TestResponseTimes(t *testing.T) {
columns := []string{"response_time_ns"}
values := [][]interface{}{[]interface{}{json.Number("380")}}
expected := ResponseTimes([]ResponseTime{NewResponseTime(380)})
got := responseTimes(columns, values)
if expected[0].Value != got[0].Value {
t.Fail()
}
}
func TestPercentile(t *testing.T) {
rs := createTestResponseTimes()
expected := time.Duration(21)
got := percentile(rs)
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}
func TestAvgDuration(t *testing.T) {
rs := createTestResponseTimes()
expected := time.Duration(11)
got := avgDuration(rs)
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}
func TestStddevDuration(t *testing.T) {
rs := createTestResponseTimes()
expected := time.Duration(6)
got := stddevDuration(rs)
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}
func createTestResponseTimes() ResponseTimes {
rstms := []int{1, 2, 3, 4, 5, 6, 7, 13, 14, 15, 16, 17, 18, 19, 8, 9, 10, 11, 12, 20, 21, 22}
rs := []ResponseTime{}
for _, rst := range rstms {
rs = append(rs, NewResponseTime(rst))
}
return rs
}

View File

@ -1,40 +0,0 @@
package statement
import (
"time"
)
// ResponseTime is a struct that contains `Value`
// `Time` pairing.
type ResponseTime struct {
Value int
Time time.Time
}
// NewResponseTime returns a new response time
// with value `v` and time `time.Now()`.
func NewResponseTime(v int) ResponseTime {
r := ResponseTime{Value: v, Time: time.Now()}
return r
}
// ResponseTimes is a slice of response times
type ResponseTimes []ResponseTime
// Implements the `Len` method for the
// sort.Interface type
func (rs ResponseTimes) Len() int {
return len(rs)
}
// Implements the `Less` method for the
// sort.Interface type
func (rs ResponseTimes) Less(i, j int) bool {
return rs[i].Value < rs[j].Value
}
// Implements the `Swap` method for the
// sort.Interface type
func (rs ResponseTimes) Swap(i, j int) {
rs[i], rs[j] = rs[j], rs[i]
}

View File

@ -1,45 +0,0 @@
package statement
import (
"testing"
)
func TestNewResponseTime(t *testing.T) {
value := 100000
rs := NewResponseTime(value)
if rs.Value != value {
t.Errorf("expected: %v\ngot: %v\n", value, rs.Value)
}
}
func newResponseTimes() ResponseTimes {
return []ResponseTime{
NewResponseTime(100),
NewResponseTime(10),
}
}
func TestResponseTimeLen(t *testing.T) {
rs := newResponseTimes()
if rs.Len() != 2 {
t.Fail()
}
}
func TestResponseTimeLess(t *testing.T) {
rs := newResponseTimes()
less := rs.Less(1, 0)
if !less {
t.Fail()
}
}
func TestResponseTimeSwap(t *testing.T) {
rs := newResponseTimes()
rs0 := rs[0]
rs1 := rs[1]
rs.Swap(0, 1)
if rs0 != rs[1] || rs1 != rs[0] {
t.Fail()
}
}

View File

@ -1,59 +0,0 @@
package statement
import (
"fmt"
"strings"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
// SetStatement set state variables for the test
type SetStatement struct {
Var string
Value string
StatementID string
Tracer *stressClient.Tracer
}
// SetID statisfies the Statement Interface
func (i *SetStatement) SetID(s string) {
i.StatementID = s
}
// Run statisfies the Statement Interface
func (i *SetStatement) Run(s *stressClient.StressTest) {
i.Tracer = stressClient.NewTracer(make(map[string]string))
d := stressClient.NewDirective(strings.ToLower(i.Var), strings.ToLower(i.Value), i.Tracer)
switch d.Property {
// Needs to be set on both StressTest and stressClient
// Set the write percison for points generated
case "precision":
s.Precision = d.Value
i.Tracer.Add(1)
s.SendDirective(d)
// Lives on StressTest
// Set the date for the first point entered into the database
case "startdate":
s.Lock()
s.StartDate = d.Value
s.Unlock()
// Lives on StressTest
// Set the BatchSize for writes
case "batchsize":
s.Lock()
s.BatchSize = parseInt(d.Value)
s.Unlock()
// All other variables live on stressClient
default:
i.Tracer.Add(1)
s.SendDirective(d)
}
i.Tracer.Wait()
}
// Report statisfies the Statement Interface
func (i *SetStatement) Report(s *stressClient.StressTest) string {
return fmt.Sprintf("SET %v = '%v'", i.Var, i.Value)
}

View File

@ -1,92 +0,0 @@
package statement
import (
"fmt"
"testing"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
func TestSetSetID(t *testing.T) {
e := newTestSet("database", "foo")
newID := "oaijnifo"
e.SetID(newID)
if e.StatementID != newID {
t.Errorf("Expected: %v\nGot: %v\n", newID, e.StatementID)
}
}
func TestSetRun(t *testing.T) {
properties := []string{
"precision",
"startdate",
"batchsize",
"resultsaddress",
"testname",
"addresses",
"writeinterval",
"queryinterval",
"database",
"writeconcurrency",
"queryconcurrency",
}
for _, prop := range properties {
testSetRunUtl(t, prop, "1")
}
}
func testSetRunUtl(t *testing.T, property string, value string) {
i := newTestSet(property, value)
s, _, directiveCh := stressClient.NewTestStressTest()
// Listen to the other side of the directiveCh
go func() {
for d := range directiveCh {
if i.Var != d.Property {
t.Errorf("wrong property sent to stressClient\n expected: %v\n got: %v\n", i.Var, d.Property)
}
if i.Value != d.Value {
t.Errorf("wrong value sent to stressClient\n expected: %v\n got: %v\n", i.Value, d.Value)
}
d.Tracer.Done()
}
}()
// Run the statement
i.Run(s)
// Check the result
switch i.Var {
case "precision":
if i.Value != s.Precision {
t.Errorf("Failed to set %v\n", i.Var)
}
case "startdate":
if i.Value != s.StartDate {
t.Errorf("Failed to set %v\n", i.Var)
}
case "batchsize":
if parseInt(i.Value) != s.BatchSize {
t.Errorf("Failed to set %v\n", i.Var)
}
// TODO: Actually test this
case "resultsaddress":
default:
}
}
func TestSetReport(t *testing.T) {
set := newTestSet("this", "that")
s, _, _ := stressClient.NewTestStressTest()
rpt := set.Report(s)
expected := fmt.Sprintf("SET %v = '%v'", set.Var, set.Value)
if rpt != expected {
t.Errorf("expected: %v\ngot: %v\n", expected, rpt)
}
}
func newTestSet(toSet, value string) *SetStatement {
return &SetStatement{
Var: toSet,
Value: value,
Tracer: stressClient.NewTracer(make(map[string]string)),
StatementID: "fooID",
}
}

View File

@ -1,32 +0,0 @@
package statement
import (
"log"
"strconv"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
// Statement is the common interface to shape the testing environment and prepare database requests
// The parser turns the 'statements' in the config file into Statements
type Statement interface {
Run(s *stressClient.StressTest)
Report(s *stressClient.StressTest) string
SetID(s string)
}
func parseInt(s string) int {
i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
log.Fatalf("Error parsing integer:\n String: %v\n Error: %v\n", s, err)
}
return int(i)
}
func parseFloat(s string) int {
i, err := strconv.ParseFloat(s, 64)
if err != nil {
log.Fatalf("Error parsing integer:\n String: %v\n Error: %v\n", s, err)
}
return int(i)
}

View File

@ -1,47 +0,0 @@
package statement
// A Template contains all information to fill in templated variables in inset and query statements
type Template struct {
Tags []string
Function *Function
}
// Templates are a collection of Template
type Templates []*Template
// Init makes Stringers out of the Templates for quick point creation
func (t Templates) Init(seriesCount int) Stringers {
arr := make([]Stringer, len(t))
for i, tmp := range t {
if len(tmp.Tags) == 0 {
arr[i] = tmp.Function.NewStringer(seriesCount)
continue
}
arr[i] = tmp.NewTagFunc()
}
return arr
}
// Calculates the number of series implied by a template
func (t *Template) numSeries() int {
// If !t.Tags then tag cardinality is t.Function.Count
if len(t.Tags) == 0 {
return t.Function.Count
}
// Else tag cardinality is len(t.Tags)
return len(t.Tags)
}
// NewTagFunc returns a Stringer that loops through the given tags
func (t *Template) NewTagFunc() Stringer {
if len(t.Tags) == 0 {
return func() string { return "EMPTY TAGS" }
}
i := 0
return func() string {
s := t.Tags[i]
i = (i + 1) % len(t.Tags)
return s
}
}

View File

@ -1,72 +0,0 @@
package statement
import (
"testing"
)
func TestNewTagFunc(t *testing.T) {
wtags := newTestTagsTemplate()
wfunc := newTestFunctionTemplate()
expected := wtags.Tags[0]
got := wtags.NewTagFunc()()
if got != expected {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
expected = "EMPTY TAGS"
got = wfunc.NewTagFunc()()
if got != expected {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}
func TestNumSeries(t *testing.T) {
wtags := newTestTagsTemplate()
wfunc := newTestFunctionTemplate()
expected := len(wtags.Tags)
got := wtags.numSeries()
if got != expected {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
expected = wfunc.Function.Count
got = wfunc.numSeries()
if got != expected {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}
func TestTemplatesInit(t *testing.T) {
tmpls := newTestTemplates()
s := tmpls.Init(5)
vals := s.Eval(spoofTime)
expected := tmpls[0].Tags[0]
got := vals[0]
if got != expected {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
expected = "0i"
got = vals[1]
if got != expected {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}
func newTestTemplates() Templates {
return []*Template{
newTestTagsTemplate(),
newTestFunctionTemplate(),
}
}
func newTestTagsTemplate() *Template {
return &Template{
Tags: []string{"thing", "other_thing"},
}
}
func newTestFunctionTemplate() *Template {
return &Template{
Function: newIntIncFunction(),
}
}

View File

@ -1,51 +0,0 @@
package statement
import (
"log"
"time"
)
// A Timestamp contains all informaiton needed to generate timestamps for points created by InsertStatements
type Timestamp struct {
Count int
Duration time.Duration
Jitter bool
}
// Time returns the next timestamp needed by the InsertStatement
func (t *Timestamp) Time(startDate string, series int, precision string) func() int64 {
var start time.Time
var err error
if startDate == "now" {
start = time.Now()
} else {
start, err = time.Parse("2006-01-02", startDate)
}
if err != nil {
log.Fatalf("Error parsing start time from StartDate\n string: %v\n error: %v\n", startDate, err)
}
return nextTime(start, t.Duration, series, precision)
}
func nextTime(ti time.Time, step time.Duration, series int, precision string) func() int64 {
t := ti
count := 0
return func() int64 {
count++
if count > series {
t = t.Add(step)
count = 1
}
var timestamp int64
if precision == "s" {
timestamp = t.Unix()
} else {
timestamp = t.UnixNano()
}
return timestamp
}
}

View File

@ -1,31 +0,0 @@
package statement
import (
"testing"
"time"
)
func TestTimestampTime(t *testing.T) {
tstp := newTestTimestamp()
function := tstp.Time("2016-01-01", 100, "s")
expected := int64(1451606400)
got := function()
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
function = tstp.Time("now", 100, "ns")
expected = time.Now().UnixNano()
got = function()
if expected < got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}
func newTestTimestamp() *Timestamp {
duration, _ := time.ParseDuration("10s")
return &Timestamp{
Count: 5001,
Duration: duration,
Jitter: false,
}
}

View File

@ -1,32 +0,0 @@
package statement
import (
"fmt"
"time"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
// WaitStatement is a Statement Implementation to prevent the test from returning to early when running GoStatements
type WaitStatement struct {
StatementID string
runtime time.Duration
}
// SetID statisfies the Statement Interface
func (w *WaitStatement) SetID(s string) {
w.StatementID = s
}
// Run statisfies the Statement Interface
func (w *WaitStatement) Run(s *stressClient.StressTest) {
runtime := time.Now()
s.Wait()
w.runtime = time.Since(runtime)
}
// Report statisfies the Statement Interface
func (w *WaitStatement) Report(s *stressClient.StressTest) string {
return fmt.Sprintf("WAIT -> %v", w.runtime)
}

View File

@ -1,41 +0,0 @@
package statement
import (
"strings"
"testing"
stressClient "github.com/influxdata/influxdb/stress/v2/stress_client"
)
func TestWaitSetID(t *testing.T) {
e := newTestWait()
newID := "oaijnifo"
e.SetID(newID)
if e.StatementID != newID {
t.Errorf("Expected: %v\ngott: %v\n", newID, e.StatementID)
}
}
func TestWaitRun(t *testing.T) {
e := newTestWait()
s, _, _ := stressClient.NewTestStressTest()
e.Run(s)
if e == nil {
t.Fail()
}
}
func TestWaitReport(t *testing.T) {
e := newTestWait()
s, _, _ := stressClient.NewTestStressTest()
rpt := e.Report(s)
if !strings.Contains(rpt, "WAIT") {
t.Fail()
}
}
func newTestWait() *WaitStatement {
return &WaitStatement{
StatementID: "fooID",
}
}

View File

@ -1,58 +0,0 @@
package stressClient
import (
"log"
"time"
"github.com/influxdata/influxdb/models"
)
// Communes are a method for passing points between InsertStatements and QueryStatements.
type commune struct {
ch chan string
storedPoint models.Point
}
// NewCommune creates a new commune with a buffered chan of length n
func newCommune(n int) *commune {
return &commune{ch: make(chan string, n)}
}
func (c *commune) point(precision string) models.Point {
pt := []byte(<-c.ch)
p, err := models.ParsePointsWithPrecision(pt, time.Now().UTC(), precision)
if err != nil {
log.Fatalf("Error parsing point for commune\n point: %v\n error: %v\n", pt, err)
}
if len(p) == 0 {
return c.storedPoint
}
c.storedPoint = p[0]
return p[0]
}
// SetCommune creates a new commune on the StressTest
func (st *StressTest) SetCommune(name string) chan<- string {
com := newCommune(10)
st.communes[name] = com
return com.ch
}
// GetPoint is called by a QueryStatement and retrieves a point sent by the associated InsertStatement
func (st *StressTest) GetPoint(name, precision string) models.Point {
p := st.communes[name].point(precision)
// Function needs to return a point. Panic if it doesn't
if p == nil {
log.Fatal("Commune not returning point")
}
return p
}

View File

@ -1,57 +0,0 @@
package stressClient
import (
"testing"
)
func TestCommunePoint(t *testing.T) {
comm := newCommune(5)
pt := "write,tag=tagVal fooField=5 1460912595"
comm.ch <- pt
point := comm.point("s")
if string(point.Name()) != "write" {
t.Errorf("expected: write\ngot: %v", string(point.Name()))
}
if point.Tags().GetString("tag") != "tagVal" {
t.Errorf("expected: tagVal\ngot: %v", point.Tags().GetString("tag"))
}
fields, err := point.Fields()
if err != nil {
t.Fatal(err)
}
if int(fields["fooField"].(float64)) != 5 {
t.Errorf("expected: 5\ngot: %v\n", fields["fooField"])
}
// Make sure commune returns the prev point
comm.ch <- ""
point = comm.point("s")
if string(point.Name()) != "write" {
t.Errorf("expected: write\ngot: %v", string(point.Name()))
}
if point.Tags().GetString("tag") != "tagVal" {
t.Errorf("expected: tagVal\ngot: %v", point.Tags().GetString("tag"))
}
if int(fields["fooField"].(float64)) != 5 {
t.Errorf("expected: 5\ngot: %v\n", fields["fooField"])
}
}
func TestSetCommune(t *testing.T) {
sf, _, _ := NewTestStressTest()
ch := sf.SetCommune("foo_name")
ch <- "write,tag=tagVal fooField=5 1460912595"
pt := sf.GetPoint("foo_name", "s")
if string(pt.Name()) != "write" {
t.Errorf("expected: write\ngot: %v", string(pt.Name()))
}
if pt.Tags().GetString("tag") != "tagVal" {
t.Errorf("expected: tagVal\ngot: %v", pt.Tags().GetString("tag"))
}
fields, err := pt.Fields()
if err != nil {
t.Fatal(err)
}
if int(fields["fooField"].(float64)) != 5 {
t.Errorf("expected: 5\ngot: %v\n", fields["fooField"])
}
}

View File

@ -1,19 +0,0 @@
package stressClient
// Directive is a struct to enable communication between SetStatements and the stressClient backend
// Directives change state for the stress test
type Directive struct {
Property string
Value string
Tracer *Tracer
}
// NewDirective creates a new instance of a Directive with the appropriate state variable to change
func NewDirective(property string, value string, tracer *Tracer) Directive {
d := Directive{
Property: property,
Value: value,
Tracer: tracer,
}
return d
}

View File

@ -1,20 +0,0 @@
package stressClient
import (
"testing"
)
func TestNewDirective(t *testing.T) {
tr := NewTracer(map[string]string{})
prop := "foo_prop"
val := "foo_value"
dir := NewDirective(prop, val, tr)
got := dir.Property
if prop != got {
t.Errorf("expected: %v\ngot: %v\n", prop, got)
}
got = dir.Value
if val != got {
t.Errorf("expected: %v\ngot: %v\n", val, got)
}
}

View File

@ -1,22 +0,0 @@
package stressClient
// Package is a struct to enable communication between InsertStatements, QueryStatements and InfluxQLStatements and the stressClient backend
// Packages carry either writes or queries in the []byte that makes up the Body
type Package struct {
T Type
Body []byte
StatementID string
Tracer *Tracer
}
// NewPackage creates a new package with the appropriate payload
func NewPackage(t Type, body []byte, statementID string, tracer *Tracer) Package {
p := Package{
T: t,
Body: body,
StatementID: statementID,
Tracer: tracer,
}
return p
}

View File

@ -1,16 +0,0 @@
package stressClient
import (
"testing"
)
func TestNewPackage(t *testing.T) {
qry := []byte("SELECT * FROM foo")
statementID := "foo_id"
tr := NewTracer(map[string]string{})
pkg := NewPackage(Query, qry, statementID, tr)
got := string(pkg.Body)
if string(qry) != got {
t.Errorf("expected: %v\ngot: %v\n", qry, got)
}
}

View File

@ -1,95 +0,0 @@
package stressClient
import (
"log"
"strconv"
"time"
influx "github.com/influxdata/influxdb/client/v2"
)
// reporting.go contains functions to emit tags and points from various parts of stressClient
// These points are then written to the ("_%v", sf.TestName) database
// These are the tags that stressClient adds to any response points
func (sc *stressClient) tags(statementID string) map[string]string {
tags := map[string]string{
"number_targets": fmtInt(len(sc.addresses)),
"precision": sc.precision,
"writers": fmtInt(sc.wconc),
"readers": fmtInt(sc.qconc),
"test_id": sc.testID,
"statement_id": statementID,
"write_interval": sc.wdelay,
"query_interval": sc.qdelay,
}
return tags
}
// These are the tags that the StressTest adds to any response points
func (st *StressTest) tags() map[string]string {
tags := map[string]string{
"precision": st.Precision,
"batch_size": fmtInt(st.BatchSize),
}
return tags
}
// This function makes a *client.Point for reporting on writes
func (sc *stressClient) writePoint(retries int, statementID string, statusCode int, responseTime time.Duration, addedTags map[string]string, writeBytes int) *influx.Point {
tags := sumTags(sc.tags(statementID), addedTags)
fields := map[string]interface{}{
"status_code": statusCode,
"response_time_ns": responseTime.Nanoseconds(),
"num_bytes": writeBytes,
}
point, err := influx.NewPoint("write", tags, fields, time.Now())
if err != nil {
log.Fatalf("Error creating write results point\n error: %v\n", err)
}
return point
}
// This function makes a *client.Point for reporting on queries
func (sc *stressClient) queryPoint(statementID string, body []byte, statusCode int, responseTime time.Duration, addedTags map[string]string) *influx.Point {
tags := sumTags(sc.tags(statementID), addedTags)
fields := map[string]interface{}{
"status_code": statusCode,
"num_bytes": len(body),
"response_time_ns": responseTime.Nanoseconds(),
}
point, err := influx.NewPoint("query", tags, fields, time.Now())
if err != nil {
log.Fatalf("Error creating query results point\n error: %v\n", err)
}
return point
}
// Adds two map[string]string together
func sumTags(tags1, tags2 map[string]string) map[string]string {
tags := make(map[string]string)
// Add all tags from first map to return map
for k, v := range tags1 {
tags[k] = v
}
// Add all tags from second map to return map
for k, v := range tags2 {
tags[k] = v
}
return tags
}
// Turns an int into a string
func fmtInt(i int) string {
return strconv.FormatInt(int64(i), 10)
}

View File

@ -1,100 +0,0 @@
package stressClient
import (
"testing"
"time"
)
func TestNewStressClientTags(t *testing.T) {
pe, _, _ := newTestStressClient("localhost:8086")
tags := pe.tags("foo_id")
expected := fmtInt(len(pe.addresses))
got := tags["number_targets"]
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
expected = pe.precision
got = tags["precision"]
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
expected = pe.wdelay
got = tags["write_interval"]
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
expected = "foo_id"
got = tags["statement_id"]
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}
func TestNewStressTestTags(t *testing.T) {
sf, _, _ := NewTestStressTest()
tags := sf.tags()
expected := sf.Precision
got := tags["precision"]
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
expected = fmtInt(sf.BatchSize)
got = tags["batch_size"]
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}
func TestWritePoint(t *testing.T) {
pe, _, _ := newTestStressClient("localhost:8086")
statementID := "foo_id"
responseCode := 200
responseTime := time.Duration(10 * time.Millisecond)
addedTags := map[string]string{"foo_tag": "foo_tag_value"}
writeBytes := 28051
pt := pe.writePoint(1, statementID, responseCode, responseTime, addedTags, writeBytes)
got := pt.Tags()["statement_id"]
if statementID != got {
t.Errorf("expected: %v\ngot: %v\n", statementID, got)
}
fields, err := pt.Fields()
if err != nil {
t.Fatal(err)
}
got2 := int(fields["status_code"].(int64))
if responseCode != got2 {
t.Errorf("expected: %v\ngot: %v\n", responseCode, got2)
}
expected := "write"
got = pt.Name()
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}
func TestQueryPoint(t *testing.T) {
pe, _, _ := newTestStressClient("localhost:8086")
statementID := "foo_id"
responseCode := 200
body := []byte{12}
responseTime := time.Duration(10 * time.Millisecond)
addedTags := map[string]string{"foo_tag": "foo_tag_value"}
pt := pe.queryPoint(statementID, body, responseCode, responseTime, addedTags)
got := pt.Tags()["statement_id"]
if statementID != got {
t.Errorf("expected: %v\ngot: %v\n", statementID, got)
}
fields, err := pt.Fields()
if err != nil {
t.Fatal(err)
}
got2 := int(fields["status_code"].(int64))
if responseCode != got2 {
t.Errorf("expected: %v\ngot: %v\n", responseCode, got2)
}
expected := "query"
got = pt.Name()
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}

View File

@ -1,50 +0,0 @@
package stressClient
import (
"log"
influx "github.com/influxdata/influxdb/client/v2"
)
// Response holds data scraped from InfluxDB HTTP responses turned into a *influx.Point for reporting
// See reporting.go for more information
// The Tracer contains a wait group sent from the statement. It needs to be decremented when the Response is consumed
type Response struct {
Point *influx.Point
Tracer *Tracer
}
// NewResponse creates a new instance of Response
func NewResponse(pt *influx.Point, tr *Tracer) Response {
return Response{
Point: pt,
Tracer: tr,
}
}
// AddTags adds additional tags to the point held in Response and returns the point
func (resp Response) AddTags(newTags map[string]string) (*influx.Point, error) {
// Pull off the current tags
tags := resp.Point.Tags()
// Add the new tags to the current tags
for tag, tagValue := range newTags {
tags[tag] = tagValue
}
// Make a new point
fields, err := resp.Point.Fields()
if err != nil {
return nil, err
}
pt, err := influx.NewPoint(resp.Point.Name(), tags, fields, resp.Point.Time())
// panic on error
if err != nil {
log.Fatalf("Error adding tags to response point\n point: %v\n tags:%v\n error: %v\n", resp.Point, newTags, err)
}
return pt, nil
}

View File

@ -1,20 +0,0 @@
package stressClient
import (
"testing"
)
func TestNewResponse(t *testing.T) {
pt := NewBlankTestPoint()
tr := NewTracer(map[string]string{})
r := NewResponse(pt, tr)
expected := "another_tag_value"
test, err := r.AddTags(map[string]string{"another_tag": "another_tag_value"})
if err != nil {
t.Fatal(err)
}
got := test.Tags()["another_tag"]
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}

View File

@ -1,175 +0,0 @@
package stressClient
import (
"fmt"
"log"
"sync"
influx "github.com/influxdata/influxdb/client/v2"
)
// NewStressTest creates the backend for the stress test
func NewStressTest() *StressTest {
packageCh := make(chan Package)
directiveCh := make(chan Directive)
responseCh := make(chan Response)
clnt, _ := influx.NewHTTPClient(influx.HTTPConfig{
Addr: fmt.Sprintf("http://%v/", "localhost:8086"),
})
s := &StressTest{
TestDB: "_stressTest",
Precision: "s",
StartDate: "2016-01-02",
BatchSize: 5000,
packageChan: packageCh,
directiveChan: directiveCh,
ResultsClient: clnt,
ResultsChan: responseCh,
communes: make(map[string]*commune),
TestID: randStr(10),
}
// Start the client service
startStressClient(packageCh, directiveCh, responseCh, s.TestID)
// Listen for Results coming in
s.resultsListen()
return s
}
// NewTestStressTest returns a StressTest to be used for testing Statements
func NewTestStressTest() (*StressTest, chan Package, chan Directive) {
packageCh := make(chan Package)
directiveCh := make(chan Directive)
s := &StressTest{
TestDB: "_stressTest",
Precision: "s",
StartDate: "2016-01-02",
BatchSize: 5000,
directiveChan: directiveCh,
packageChan: packageCh,
communes: make(map[string]*commune),
TestID: randStr(10),
}
return s, packageCh, directiveCh
}
// The StressTest is the Statement facing API that consumes Statement output and coordinates the test results
type StressTest struct {
TestID string
TestDB string
Precision string
StartDate string
BatchSize int
sync.WaitGroup
sync.Mutex
packageChan chan<- Package
directiveChan chan<- Directive
ResultsChan chan Response
communes map[string]*commune
ResultsClient influx.Client
}
// SendPackage is the public facing API for to send Queries and Points
func (st *StressTest) SendPackage(p Package) {
st.packageChan <- p
}
// SendDirective is the public facing API to set state variables in the test
func (st *StressTest) SendDirective(d Directive) {
st.directiveChan <- d
}
// Starts a go routine that listens for Results
func (st *StressTest) resultsListen() {
st.createDatabase(st.TestDB)
go func() {
bp := st.NewResultsPointBatch()
for resp := range st.ResultsChan {
switch resp.Point.Name() {
case "done":
st.ResultsClient.Write(bp)
resp.Tracer.Done()
default:
// Add the StressTest tags
pt, err := resp.AddTags(st.tags())
if err != nil {
panic(err)
}
// Add the point to the batch
bp = st.batcher(pt, bp)
resp.Tracer.Done()
}
}
}()
}
// NewResultsPointBatch creates a new batch of points for the results
func (st *StressTest) NewResultsPointBatch() influx.BatchPoints {
bp, _ := influx.NewBatchPoints(influx.BatchPointsConfig{
Database: st.TestDB,
Precision: "ns",
})
return bp
}
// Batches incoming Result.Point and sends them if the batch reaches 5k in size
func (st *StressTest) batcher(pt *influx.Point, bp influx.BatchPoints) influx.BatchPoints {
if len(bp.Points()) <= 5000 {
bp.AddPoint(pt)
} else {
err := st.ResultsClient.Write(bp)
if err != nil {
log.Fatalf("Error writing performance stats\n error: %v\n", err)
}
bp = st.NewResultsPointBatch()
}
return bp
}
// Convinence database creation function
func (st *StressTest) createDatabase(db string) {
query := fmt.Sprintf("CREATE DATABASE %v", db)
res, err := st.ResultsClient.Query(influx.Query{Command: query})
if err != nil {
log.Fatalf("error: no running influx server at localhost:8086")
if res.Error() != nil {
log.Fatalf("error: no running influx server at localhost:8086")
}
}
}
// GetStatementResults is a convinence function for fetching all results given a StatementID
func (st *StressTest) GetStatementResults(sID, t string) (res []influx.Result) {
qryStr := fmt.Sprintf(`SELECT * FROM "%v" WHERE statement_id = '%v'`, t, sID)
return st.queryTestResults(qryStr)
}
// Runs given qry on the test results database and returns the results or nil in case of error
func (st *StressTest) queryTestResults(qry string) (res []influx.Result) {
response, err := st.ResultsClient.Query(influx.Query{Command: qry, Database: st.TestDB})
if err == nil {
if response.Error() != nil {
log.Fatalf("Error sending results query\n error: %v\n", response.Error())
}
}
if response.Results[0].Series == nil {
return nil
}
return response.Results
}

View File

@ -1,32 +0,0 @@
package stressClient
import (
"testing"
"time"
influx "github.com/influxdata/influxdb/client/v2"
)
func NewBlankTestPoint() *influx.Point {
meas := "measurement"
tags := map[string]string{"fooTag": "fooTagValue"}
fields := map[string]interface{}{"value": 5920}
utc, _ := time.LoadLocation("UTC")
timestamp := time.Date(2016, time.Month(4), 20, 0, 0, 0, 0, utc)
pt, _ := influx.NewPoint(meas, tags, fields, timestamp)
return pt
}
func TestStressTestBatcher(t *testing.T) {
sf, _, _ := NewTestStressTest()
bpconf := influx.BatchPointsConfig{
Database: sf.TestDB,
Precision: "ns",
}
bp, _ := influx.NewBatchPoints(bpconf)
pt := NewBlankTestPoint()
bp = sf.batcher(pt, bp)
if len(bp.Points()) != 1 {
t.Fail()
}
}

View File

@ -1,175 +0,0 @@
package stressClient
import (
"strings"
"sync"
)
// Type refers to the different Package types
type Type int
// There are two package types, Write and Query
const (
Write Type = iota
Query
)
func startStressClient(packageCh <-chan Package, directiveCh <-chan Directive, responseCh chan<- Response, testID string) {
c := &stressClient{
testID: testID,
addresses: []string{"localhost:8086"},
ssl: false,
username: "",
password: "",
precision: "ns",
database: "stress",
startDate: "2016-01-01",
qdelay: "0s",
wdelay: "0s",
wconc: 10,
qconc: 5,
packageChan: packageCh,
directiveChan: directiveCh,
responseChan: responseCh,
}
// start listening for writes and queries
go c.listen()
// start listening for state changes
go c.directiveListen()
}
type stressClient struct {
testID string
// State for the Stress Test
addresses []string
precision string
startDate string
database string
wdelay string
qdelay string
username string
password string
ssl bool
// Channels from statements
packageChan <-chan Package
directiveChan <-chan Directive
// Response channel
responseChan chan<- Response
// Concurrency utilities
sync.WaitGroup
sync.Mutex
// Concurrency Limit for Writes and Reads
wconc int
qconc int
// Manage Read and Write concurrency separately
wc *ConcurrencyLimiter
rc *ConcurrencyLimiter
}
// NewTestStressClient returns a blank stressClient for testing
func newTestStressClient(url string) (*stressClient, chan Directive, chan Package) {
pkgChan := make(chan Package)
dirChan := make(chan Directive)
pe := &stressClient{
testID: "foo_id",
addresses: []string{url},
precision: "s",
startDate: "2016-01-01",
database: "fooDatabase",
wdelay: "50ms",
qdelay: "50ms",
ssl: false,
username: "",
password: "",
wconc: 5,
qconc: 5,
packageChan: pkgChan,
directiveChan: dirChan,
wc: NewConcurrencyLimiter(1),
rc: NewConcurrencyLimiter(1),
}
return pe, dirChan, pkgChan
}
// stressClient starts listening for Packages on the main channel
func (sc *stressClient) listen() {
defer sc.Wait()
sc.wc = NewConcurrencyLimiter(sc.wconc)
sc.rc = NewConcurrencyLimiter(sc.qconc)
l := NewConcurrencyLimiter((sc.wconc + sc.qconc) * 2)
counter := 0
for p := range sc.packageChan {
l.Increment()
go func(p Package) {
defer l.Decrement()
switch p.T {
case Write:
sc.spinOffWritePackage(p, (counter % len(sc.addresses)))
case Query:
sc.spinOffQueryPackage(p, (counter % len(sc.addresses)))
}
}(p)
counter++
}
}
// Set handles all SET requests for test state
func (sc *stressClient) directiveListen() {
for d := range sc.directiveChan {
sc.Lock()
switch d.Property {
// addresses is a []string of target InfluxDB instance(s) for the test
// comes in as a "|" separated array of addresses
case "addresses":
addr := strings.Split(d.Value, "|")
sc.addresses = addr
// percison is the write precision for InfluxDB
case "precision":
sc.precision = d.Value
// writeinterval is an optional delay between batches
case "writeinterval":
sc.wdelay = d.Value
// queryinterval is an optional delay between the batches
case "queryinterval":
sc.qdelay = d.Value
// database is the InfluxDB database to target for both writes and queries
case "database":
sc.database = d.Value
// username for the target database
case "username":
sc.username = d.Value
// username for the target database
case "password":
sc.password = d.Value
// use https if sent true
case "ssl":
if d.Value == "true" {
sc.ssl = true
}
// concurrency is the number concurrent writers to the database
case "writeconcurrency":
conc := parseInt(d.Value)
sc.wconc = conc
sc.wc.NewMax(conc)
// concurrentqueries is the number of concurrent queriers database
case "queryconcurrency":
conc := parseInt(d.Value)
sc.qconc = conc
sc.rc.NewMax(conc)
}
d.Tracer.Done()
sc.Unlock()
}
}

View File

@ -1,69 +0,0 @@
package stressClient
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"time"
)
func (sc *stressClient) spinOffQueryPackage(p Package, serv int) {
sc.Add(1)
sc.rc.Increment()
go func() {
// Send the query
sc.prepareQuerySend(p, serv)
sc.Done()
sc.rc.Decrement()
}()
}
// Prepares to send the GET request
func (sc *stressClient) prepareQuerySend(p Package, serv int) {
var queryTemplate string
if sc.ssl {
queryTemplate = "https://%v/query?db=%v&q=%v&u=%v&p=%v"
} else {
queryTemplate = "http://%v/query?db=%v&q=%v&u=%v&p=%v"
}
queryURL := fmt.Sprintf(queryTemplate, sc.addresses[serv], sc.database, url.QueryEscape(string(p.Body)), sc.username, sc.password)
// Send the query
sc.makeGet(queryURL, p.StatementID, p.Tracer)
// Query Interval enforcement
qi, _ := time.ParseDuration(sc.qdelay)
time.Sleep(qi)
}
// Sends the GET request, reads it, and handles errors
func (sc *stressClient) makeGet(addr, statementID string, tr *Tracer) {
// Make GET request
t := time.Now()
resp, err := http.Get(addr)
elapsed := time.Since(t)
if err != nil {
log.Printf("Error making Query HTTP request\n error: %v\n", err)
}
defer resp.Body.Close()
// Read body and return it for Reporting
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalf("Error reading Query response body\n error: %v\n", err)
}
if resp.StatusCode != 200 {
log.Printf("Query returned non 200 status\n status: %v\n error: %v\n", resp.StatusCode, string(body))
}
// Send the response
sc.responseChan <- NewResponse(sc.queryPoint(statementID, body, resp.StatusCode, elapsed, tr.Tags), tr)
}

View File

@ -1,112 +0,0 @@
package stressClient
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"time"
)
// ###############################################
// A selection of methods to manage the write path
// ###############################################
// Packages up Package from channel in goroutine
func (sc *stressClient) spinOffWritePackage(p Package, serv int) {
sc.Add(1)
sc.wc.Increment()
go func() {
sc.retry(p, time.Duration(time.Nanosecond), serv)
sc.Done()
sc.wc.Decrement()
}()
}
// Implements backoff and retry logic for 500 responses
func (sc *stressClient) retry(p Package, backoff time.Duration, serv int) {
// Set Backoff Interval to 500ms
backoffInterval := time.Duration(500 * time.Millisecond)
// Arithmetic backoff for kicks
bo := backoff + backoffInterval
// Make the write request
resp, elapsed, err := sc.prepareWrite(p.Body, serv)
// Find number of times request has been retried
numBackoffs := int(bo/backoffInterval) - 1
// On 500 responses, resp == nil. This logic keeps program for panicing
var statusCode int
if resp == nil {
statusCode = 500
} else {
statusCode = resp.StatusCode
}
// Make a point for reporting
point := sc.writePoint(numBackoffs, p.StatementID, statusCode, elapsed, p.Tracer.Tags, len(p.Body))
// Send the Response(point, tracer)
sc.responseChan <- NewResponse(point, p.Tracer)
// BatchInterval enforcement
bi, _ := time.ParseDuration(sc.wdelay)
time.Sleep(bi)
// Retry if the statusCode was not 204 or the err != nil
if !(statusCode == 204) || err != nil {
// Increment the *Tracer waitgroup if we are going to retry the request
p.Tracer.Add(1)
// Log the error if there is one
fmt.Println(err)
// Backoff enforcement
time.Sleep(bo)
sc.retry(p, bo, serv)
}
}
// Prepares to send the POST request
func (sc *stressClient) prepareWrite(points []byte, serv int) (*http.Response, time.Duration, error) {
// Construct address string
var writeTemplate string
if sc.ssl {
writeTemplate = "https://%v/write?db=%v&precision=%v&u=%v&p=%v"
} else {
writeTemplate = "http://%v/write?db=%v&precision=%v&u=%v&p=%v"
}
address := fmt.Sprintf(writeTemplate, sc.addresses[serv], sc.database, sc.precision, sc.username, sc.password)
// Start timer
t := time.Now()
resp, err := makePost(address, bytes.NewBuffer(points))
elapsed := time.Since(t)
return resp, elapsed, err
}
// Send POST request, read it, and handle errors
func makePost(url string, points io.Reader) (*http.Response, error) {
resp, err := http.Post(url, "text/plain", points)
if err != nil {
return resp, fmt.Errorf("Error making write POST request\n error: %v\n url: %v\n", err, url)
}
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != 204 {
return resp, fmt.Errorf("Write returned non-204 status code\n StatusCode: %v\n InfluxDB Error: %v\n", resp.StatusCode, string(body))
}
resp.Body.Close()
return resp, nil
}

View File

@ -1,19 +0,0 @@
package stressClient
import (
"sync"
)
// The Tracer carrys tags and a waitgroup from the statements through the package life cycle
type Tracer struct {
Tags map[string]string
sync.WaitGroup
}
// NewTracer returns a Tracer with tags attached
func NewTracer(tags map[string]string) *Tracer {
return &Tracer{
Tags: tags,
}
}

View File

@ -1,17 +0,0 @@
package stressClient
import (
"testing"
)
func TestNewTracer(t *testing.T) {
tagValue := "foo_tag_value"
tracer := NewTracer(map[string]string{"foo_tag_key": tagValue})
got := tracer.Tags["foo_tag_key"]
if got != tagValue {
t.Errorf("expected: %v\ngot: %v", tagValue, got)
}
tracer.Add(1)
tracer.Done()
tracer.Wait()
}

View File

@ -1,89 +0,0 @@
package stressClient
import (
"crypto/rand"
"fmt"
"log"
"strconv"
"sync"
)
// ###########################################
// ConcurrencyLimiter and associated methods #
// ###########################################
// ConcurrencyLimiter ensures that no more than a specified
// max number of goroutines are running.
type ConcurrencyLimiter struct {
inc chan chan struct{}
dec chan struct{}
max int
count int
sync.Mutex
}
// NewConcurrencyLimiter returns a configured limiter that will
// ensure that calls to Increment will block if the max is hit.
func NewConcurrencyLimiter(max int) *ConcurrencyLimiter {
c := &ConcurrencyLimiter{
inc: make(chan chan struct{}),
dec: make(chan struct{}, max),
max: max,
}
go c.handleLimits()
return c
}
// Increment will increase the count of running goroutines by 1.
// if the number is currently at the max, the call to Increment
// will block until another goroutine decrements.
func (c *ConcurrencyLimiter) Increment() {
r := make(chan struct{})
c.inc <- r
<-r
}
// Decrement will reduce the count of running goroutines by 1
func (c *ConcurrencyLimiter) Decrement() {
c.dec <- struct{}{}
}
// NewMax resets the max of a ConcurrencyLimiter.
func (c *ConcurrencyLimiter) NewMax(i int) {
c.Lock()
defer c.Unlock()
c.max = i
}
// handleLimits runs in a goroutine to manage the count of
// running goroutines.
func (c *ConcurrencyLimiter) handleLimits() {
for {
r := <-c.inc
c.Lock()
if c.count >= c.max {
<-c.dec
c.count--
}
c.Unlock()
c.count++
r <- struct{}{}
}
}
// Utility interger parsing function
func parseInt(s string) int {
i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
log.Fatalf("Error parsing integer:\n String: %v\n Error: %v\n", s, err)
}
return int(i)
}
// Utility for making random strings of length n
func randStr(n int) string {
b := make([]byte, n/2)
_, _ = rand.Read(b)
return fmt.Sprintf("%x", b)
}

View File

@ -1,158 +0,0 @@
package stressql
import (
"bufio"
"bytes"
"io"
"log"
"os"
"strings"
"github.com/influxdata/influxdb/stress/v2/statement"
stressql "github.com/influxdata/influxdb/stress/v2/stressql/statement"
"github.com/influxdata/influxql"
)
// Token represents a lexical token.
type Token int
// These are the lexical tokens used by the file parser
const (
ILLEGAL Token = iota
EOF
STATEMENT
BREAK
)
var eof = rune(0)
func check(e error) {
if e != nil {
log.Fatal(e)
}
}
func isNewline(r rune) bool {
return r == '\n'
}
// Scanner scans the file and tokenizes the raw text
type Scanner struct {
r *bufio.Reader
}
// NewScanner returns a Scanner
func NewScanner(r io.Reader) *Scanner {
return &Scanner{r: bufio.NewReader(r)}
}
func (s *Scanner) read() rune {
ch, _, err := s.r.ReadRune()
if err != nil {
return eof
}
return ch
}
func (s *Scanner) unread() { _ = s.r.UnreadRune() }
func (s *Scanner) peek() rune {
ch := s.read()
s.unread()
return ch
}
// Scan moves the Scanner forward one character
func (s *Scanner) Scan() (tok Token, lit string) {
ch := s.read()
if isNewline(ch) {
s.unread()
return s.scanNewlines()
} else if ch == eof {
return EOF, ""
} else {
s.unread()
return s.scanStatements()
}
// golint marks as unreachable code
// return ILLEGAL, string(ch)
}
func (s *Scanner) scanNewlines() (tok Token, lit string) {
var buf bytes.Buffer
buf.WriteRune(s.read())
for {
if ch := s.read(); ch == eof {
break
} else if !isNewline(ch) {
s.unread()
break
} else {
buf.WriteRune(ch)
}
}
return BREAK, buf.String()
}
func (s *Scanner) scanStatements() (tok Token, lit string) {
var buf bytes.Buffer
buf.WriteRune(s.read())
for {
if ch := s.read(); ch == eof {
break
} else if isNewline(ch) && isNewline(s.peek()) {
s.unread()
break
} else if isNewline(ch) {
s.unread()
buf.WriteRune(ch)
} else {
buf.WriteRune(ch)
}
}
return STATEMENT, buf.String()
}
// ParseStatements takes a configFile and returns a slice of Statements
func ParseStatements(file string) ([]statement.Statement, error) {
seq := []statement.Statement{}
f, err := os.Open(file)
check(err)
s := NewScanner(f)
for {
t, l := s.Scan()
if t == EOF {
break
}
_, err := influxql.ParseStatement(l)
if err == nil {
seq = append(seq, &statement.InfluxqlStatement{Query: l, StatementID: stressql.RandStr(10)})
} else if t == BREAK {
continue
} else {
f := strings.NewReader(l)
p := stressql.NewParser(f)
s, err := p.Parse()
if err != nil {
return nil, err
}
seq = append(seq, s)
}
}
f.Close()
return seq, nil
}

View File

@ -1,16 +0,0 @@
package stressql
import "testing"
// Pulls the default configFile and makes sure it parses
func TestParseStatements(t *testing.T) {
stmts, err := ParseStatements("../iql/file.iql")
if err != nil {
t.Error(err)
}
expected := 15
got := len(stmts)
if expected != got {
t.Errorf("expected: %v\ngot: %v\n", expected, got)
}
}

View File

@ -1,687 +0,0 @@
package statement
import (
"bufio"
"bytes"
"crypto/rand"
"fmt"
"io"
"log"
"strconv"
"strings"
"time"
"github.com/influxdata/influxdb/stress/v2/statement"
)
// Token represents a lexical token.
type Token int
// The following tokens represent the different values in the AST that make up stressql
const (
ILLEGAL Token = iota
EOF
WS
literalBeg
// IDENT and the following are InfluxQL literal tokens.
IDENT // main
NUMBER // 12345.67
DURATIONVAL // 13h
STRING // "abc"
BADSTRING // "abc
TEMPLATEVAR // %f
literalEnd
COMMA // ,
LPAREN // (
RPAREN // )
LBRACKET // [
RBRACKET // ]
PIPE // |
PERIOD // .
keywordBeg
SET
USE
QUERY
INSERT
GO
DO
WAIT
STR
INT
FLOAT
EXEC
keywordEnd
)
// These assignments prevent static analysis tools highlighting lack of use of
// boundary constants.
var _, _ = literalBeg, literalEnd
var _, _ = keywordBeg, keywordEnd
var eof = rune(1)
func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' }
func isDigit(r rune) bool {
return r >= '0' && r <= '9'
}
func isLetter(ch rune) bool {
return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch == '@')
}
// Scanner scans over the file and converts the raw text into tokens
type Scanner struct {
r *bufio.Reader
}
// NewScanner returns a Scanner
func NewScanner(r io.Reader) *Scanner {
return &Scanner{r: bufio.NewReader(r)}
}
func (s *Scanner) read() rune {
ch, _, err := s.r.ReadRune()
if err != nil {
return eof
}
return ch
}
func (s *Scanner) unread() { _ = s.r.UnreadRune() }
// Scan moves to the next character in the file and returns a tokenized version as well as the literal
func (s *Scanner) Scan() (tok Token, lit string) {
ch := s.read()
if isWhitespace(ch) {
s.unread()
return s.scanWhitespace()
} else if isLetter(ch) {
s.unread()
return s.scanIdent()
} else if isDigit(ch) {
s.unread()
return s.scanNumber()
}
switch ch {
case eof:
return EOF, ""
case '"':
s.unread()
return s.scanIdent()
case '%':
s.unread()
return s.scanTemplateVar()
case ',':
return COMMA, ","
case '.':
return PERIOD, "."
case '(':
return LPAREN, "("
case ')':
return RPAREN, ")"
case '[':
return LBRACKET, "["
case ']':
return RBRACKET, "]"
case '|':
return PIPE, "|"
}
return ILLEGAL, string(ch)
}
func (s *Scanner) scanWhitespace() (tok Token, lit string) {
var buf bytes.Buffer
buf.WriteRune(s.read())
for {
if ch := s.read(); ch == eof {
break
} else if !isWhitespace(ch) {
s.unread()
break
} else {
buf.WriteRune(ch)
}
}
return WS, buf.String()
}
func (s *Scanner) scanIdent() (tok Token, lit string) {
var buf bytes.Buffer
buf.WriteRune(s.read())
for {
if ch := s.read(); ch == eof {
break
} else if !isLetter(ch) && !isDigit(ch) && ch != '_' && ch != ':' && ch != '=' && ch != '-' {
s.unread()
break
} else {
_, _ = buf.WriteRune(ch)
}
}
switch strings.ToUpper(buf.String()) {
case "SET":
return SET, buf.String()
case "USE":
return USE, buf.String()
case "QUERY":
return QUERY, buf.String()
case "INSERT":
return INSERT, buf.String()
case "EXEC":
return EXEC, buf.String()
case "WAIT":
return WAIT, buf.String()
case "GO":
return GO, buf.String()
case "DO":
return DO, buf.String()
case "STR":
return STR, buf.String()
case "FLOAT":
return FLOAT, buf.String()
case "INT":
return INT, buf.String()
}
return IDENT, buf.String()
}
func (s *Scanner) scanTemplateVar() (tok Token, lit string) {
var buf bytes.Buffer
buf.WriteRune(s.read())
buf.WriteRune(s.read())
return TEMPLATEVAR, buf.String()
}
func (s *Scanner) scanNumber() (tok Token, lit string) {
var buf bytes.Buffer
buf.WriteRune(s.read())
for {
if ch := s.read(); ch == eof {
break
} else if ch == 'n' || ch == 's' || ch == 'm' {
_, _ = buf.WriteRune(ch)
return DURATIONVAL, buf.String()
} else if !isDigit(ch) {
s.unread()
break
} else {
_, _ = buf.WriteRune(ch)
}
}
return NUMBER, buf.String()
}
/////////////////////////////////
// PARSER ///////////////////////
/////////////////////////////////
// Parser turns the file from raw text into an AST
type Parser struct {
s *Scanner
buf struct {
tok Token
lit string
n int
}
}
// NewParser creates a new Parser
func NewParser(r io.Reader) *Parser {
return &Parser{s: NewScanner(r)}
}
// Parse returns a Statement
func (p *Parser) Parse() (statement.Statement, error) {
tok, lit := p.scanIgnoreWhitespace()
switch tok {
case QUERY:
p.unscan()
return p.ParseQueryStatement()
case INSERT:
p.unscan()
return p.ParseInsertStatement()
case EXEC:
p.unscan()
return p.ParseExecStatement()
case SET:
p.unscan()
return p.ParseSetStatement()
case GO:
p.unscan()
return p.ParseGoStatement()
case WAIT:
p.unscan()
return p.ParseWaitStatement()
}
return nil, fmt.Errorf("Improper syntax\n unknown token found between statements, token: %v\n", lit)
}
// ParseQueryStatement returns a QueryStatement
func (p *Parser) ParseQueryStatement() (*statement.QueryStatement, error) {
stmt := &statement.QueryStatement{
StatementID: RandStr(10),
}
if tok, lit := p.scanIgnoreWhitespace(); tok != QUERY {
return nil, fmt.Errorf("Error parsing Query Statement\n Expected: QUERY\n Found: %v\n", lit)
}
tok, lit := p.scanIgnoreWhitespace()
if tok != IDENT {
return nil, fmt.Errorf("Error parsing Query Statement\n Expected: IDENT\n Found: %v\n", lit)
}
stmt.Name = lit
for {
tok, lit := p.scan()
if tok == TEMPLATEVAR {
stmt.TemplateString += "%v"
stmt.Args = append(stmt.Args, lit)
} else if tok == DO {
tok, lit := p.scanIgnoreWhitespace()
if tok != NUMBER {
return nil, fmt.Errorf("Error parsing Query Statement\n Expected: NUMBER\n Found: %v\n", lit)
}
// Parse out the integer
i, err := strconv.ParseInt(lit, 10, 64)
if err != nil {
log.Fatalf("Error parsing integer in Query Statement:\n string: %v\n error: %v\n", lit, err)
}
stmt.Count = int(i)
break
} else if tok == WS && lit == "\n" {
continue
} else {
stmt.TemplateString += lit
}
}
return stmt, nil
}
// ParseInsertStatement returns a InsertStatement
func (p *Parser) ParseInsertStatement() (*statement.InsertStatement, error) {
// Initialize the InsertStatement with a statementId
stmt := &statement.InsertStatement{
StatementID: RandStr(10),
}
// If the first word is INSERT
if tok, lit := p.scanIgnoreWhitespace(); tok != INSERT {
return nil, fmt.Errorf("Error parsing Insert Statement\n Expected: INSERT\n Found: %v\n", lit)
}
// Next should come the NAME of the statement. It is IDENT type
tok, lit := p.scanIgnoreWhitespace()
if tok != IDENT {
return nil, fmt.Errorf("Error parsing Insert Statement\n Expected: IDENT\n Found: %v\n", lit)
}
// Set the Name
stmt.Name = lit
// Next char should be a newline
tok, lit = p.scan()
if tok != WS {
return nil, fmt.Errorf("Error parsing Insert Statement\n Expected: WS\n Found: %v\n", lit)
}
// We are now scanning the tags line
var prev Token
inTags := true
for {
// Start for loop by scanning
tok, lit = p.scan()
// If scaned is WS then we are just entering tags or leaving tags or fields
if tok == WS {
// If previous is COMMA then we are leaving measurement, continue
if prev == COMMA {
continue
}
// Otherwise we need to add a space to the template string and we are out of tags
stmt.TemplateString += " "
inTags = false
} else if tok == LBRACKET {
// If we are still inTags and there is a LBRACKET we are adding another template
if inTags {
stmt.TagCount++
}
// Add a space to fill template string with template result
stmt.TemplateString += "%v"
// parse template should return a template type
expr, err := p.ParseTemplate()
// If there is a Template parsing error return it
if err != nil {
return nil, err
}
// Add template to parsed select statement
stmt.Templates = append(stmt.Templates, expr)
// A number signifies that we are in the Timestamp section
} else if tok == NUMBER {
// Add a space to fill template string with timestamp
stmt.TemplateString += "%v"
p.unscan()
// Parse out the Timestamp
ts, err := p.ParseTimestamp()
// If there is a Timestamp parsing error return it
if err != nil {
return nil, err
}
// Set the Timestamp
stmt.Timestamp = ts
// Break loop as InsertStatement ends
break
} else if tok != IDENT && tok != COMMA {
return nil, fmt.Errorf("Error parsing Insert Statement\n Expected: IDENT or COMMA\n Found: %v\n", lit)
} else {
prev = tok
stmt.TemplateString += lit
}
}
return stmt, nil
}
// ParseTemplate returns a Template
func (p *Parser) ParseTemplate() (*statement.Template, error) {
// Blank template
tmplt := &statement.Template{}
for {
// Scan to start loop
tok, lit := p.scanIgnoreWhitespace()
// If the tok == IDENT explicit tags are passed. Add them to the list of tags
if tok == IDENT {
tmplt.Tags = append(tmplt.Tags, lit)
// Different flavors of functions
} else if tok == INT || tok == FLOAT || tok == STR {
p.unscan()
// Parse out the function
fn, err := p.ParseFunction()
// If there is a Function parsing error return it
if err != nil {
return nil, err
}
// Set the Function on the Template
tmplt.Function = fn
// End of Function
} else if tok == RBRACKET {
break
}
}
return tmplt, nil
}
// ParseExecStatement returns a ExecStatement
func (p *Parser) ParseExecStatement() (*statement.ExecStatement, error) {
// NEEDS TO PARSE ACTUAL PATH TO SCRIPT CURRENTLY ONLY DOES
// IDENT SCRIPT NAMES
stmt := &statement.ExecStatement{
StatementID: RandStr(10),
}
if tok, lit := p.scanIgnoreWhitespace(); tok != EXEC {
return nil, fmt.Errorf("Error parsing Exec Statement\n Expected: EXEC\n Found: %v\n", lit)
}
tok, lit := p.scanIgnoreWhitespace()
if tok != IDENT {
return nil, fmt.Errorf("Error parsing Exec Statement\n Expected: IDENT\n Found: %v\n", lit)
}
stmt.Script = lit
return stmt, nil
}
// ParseSetStatement returns a SetStatement
func (p *Parser) ParseSetStatement() (*statement.SetStatement, error) {
stmt := &statement.SetStatement{
StatementID: RandStr(10),
}
if tok, lit := p.scanIgnoreWhitespace(); tok != SET {
return nil, fmt.Errorf("Error parsing Set Statement\n Expected: SET\n Found: %v\n", lit)
}
tok, lit := p.scanIgnoreWhitespace()
if tok != IDENT {
return nil, fmt.Errorf("Error parsing Set Statement\n Expected: IDENT\n Found: %v\n", lit)
}
stmt.Var = lit
tok, lit = p.scanIgnoreWhitespace()
if tok != LBRACKET {
return nil, fmt.Errorf("Error parsing Set Statement\n Expected: RBRACKET\n Found: %v\n", lit)
}
for {
tok, lit = p.scanIgnoreWhitespace()
if tok == RBRACKET {
break
} else if lit != "-" && lit != ":" && tok != IDENT && tok != NUMBER && tok != DURATIONVAL && tok != PERIOD && tok != PIPE {
return nil, fmt.Errorf("Error parsing Set Statement\n Expected: IDENT || NUMBER || DURATION\n Found: %v\n", lit)
}
stmt.Value += lit
}
return stmt, nil
}
// ParseWaitStatement returns a WaitStatement
func (p *Parser) ParseWaitStatement() (*statement.WaitStatement, error) {
stmt := &statement.WaitStatement{
StatementID: RandStr(10),
}
if tok, lit := p.scanIgnoreWhitespace(); tok != WAIT {
return nil, fmt.Errorf("Error parsing Wait Statement\n Expected: WAIT\n Found: %v\n", lit)
}
return stmt, nil
}
// ParseGoStatement returns a GoStatement
func (p *Parser) ParseGoStatement() (*statement.GoStatement, error) {
stmt := &statement.GoStatement{}
stmt.StatementID = RandStr(10)
if tok, lit := p.scanIgnoreWhitespace(); tok != GO {
return nil, fmt.Errorf("Error parsing Go Statement\n Expected: GO\n Found: %v\n", lit)
}
var body statement.Statement
var err error
tok, _ := p.scanIgnoreWhitespace()
switch tok {
case QUERY:
p.unscan()
body, err = p.ParseQueryStatement()
case INSERT:
p.unscan()
body, err = p.ParseInsertStatement()
case EXEC:
p.unscan()
body, err = p.ParseExecStatement()
}
if err != nil {
return nil, err
}
stmt.Statement = body
return stmt, nil
}
// ParseFunction returns a Function
func (p *Parser) ParseFunction() (*statement.Function, error) {
fn := &statement.Function{}
_, lit := p.scanIgnoreWhitespace()
fn.Type = lit
_, lit = p.scanIgnoreWhitespace()
fn.Fn = lit
tok, lit := p.scanIgnoreWhitespace()
if tok != LPAREN {
return nil, fmt.Errorf("Error parsing Insert template function\n Expected: LPAREN\n Found: %v\n", lit)
}
tok, lit = p.scanIgnoreWhitespace()
if tok != NUMBER {
return nil, fmt.Errorf("Error parsing Insert template function\n Expected: NUMBER\n Found: %v\n", lit)
}
// Parse out the integer
i, err := strconv.ParseInt(lit, 10, 64)
if err != nil {
log.Fatalf("Error parsing integer in Insert template function:\n string: %v\n error: %v\n", lit, err)
}
fn.Argument = int(i)
tok, _ = p.scanIgnoreWhitespace()
if tok != RPAREN {
return nil, fmt.Errorf("Error parsing Insert template function\n Expected: RPAREN\n Found: %v\n", lit)
}
tok, lit = p.scanIgnoreWhitespace()
if tok != NUMBER {
return nil, fmt.Errorf("Error parsing Insert template function\n Expected: NUMBER\n Found: %v\n", lit)
}
// Parse out the integer
i, err = strconv.ParseInt(lit, 10, 64)
if err != nil {
log.Fatalf("Error parsing integer in Insert template function:\n string: %v\n error: %v\n", lit, err)
}
fn.Count = int(i)
return fn, nil
}
// ParseTimestamp returns a Timestamp
func (p *Parser) ParseTimestamp() (*statement.Timestamp, error) {
ts := &statement.Timestamp{}
tok, lit := p.scanIgnoreWhitespace()
if tok != NUMBER {
return nil, fmt.Errorf("Error parsing Insert timestamp\n Expected: NUMBER\n Found: %v\n", lit)
}
// Parse out the integer
i, err := strconv.ParseInt(lit, 10, 64)
if err != nil {
log.Fatalf("Error parsing integer in Insert timestamp:\n string: %v\n error: %v\n", lit, err)
}
ts.Count = int(i)
tok, lit = p.scanIgnoreWhitespace()
if tok != DURATIONVAL {
return nil, fmt.Errorf("Error parsing Insert timestamp\n Expected: DURATION\n Found: %v\n", lit)
}
// Parse out the duration
dur, err := time.ParseDuration(lit)
if err != nil {
log.Fatalf("Error parsing duration in Insert timestamp:\n string: %v\n error: %v\n", lit, err)
}
ts.Duration = dur
return ts, nil
}
func (p *Parser) scan() (tok Token, lit string) {
// If we have a token on the buffer, then return it.
if p.buf.n != 0 {
p.buf.n = 0
return p.buf.tok, p.buf.lit
}
// Otherwise read the next token from the scanner.
tok, lit = p.s.Scan()
// Save it to the buffer in case we unscan later.
p.buf.tok, p.buf.lit = tok, lit
return
}
// scanIgnoreWhitespace scans the next non-whitespace token.
func (p *Parser) scanIgnoreWhitespace() (tok Token, lit string) {
tok, lit = p.scan()
if tok == WS {
tok, lit = p.scan()
}
return
}
// unscan pushes the previously read token back onto the buffer.
func (p *Parser) unscan() { p.buf.n = 1 }
// RandStr returns a string of random characters with length n
func RandStr(n int) string {
b := make([]byte, n/2)
_, _ = rand.Read(b)
return fmt.Sprintf("%x", b)
}

View File

@ -1,243 +0,0 @@
package statement
import (
// "fmt"
"reflect"
"strings"
"testing"
"time"
"github.com/influxdata/influxdb/stress/v2/statement"
)
func newParserFromString(s string) *Parser {
f := strings.NewReader(s)
p := NewParser(f)
return p
}
func TestParser_ParseStatement(t *testing.T) {
var tests = []struct {
skip bool
s string
stmt statement.Statement
err string
}{
// QUERY
{
s: "QUERY basicCount\nSELECT count(%f) FROM cpu\nDO 100",
stmt: &statement.QueryStatement{Name: "basicCount", TemplateString: "SELECT count(%v) FROM cpu", Args: []string{"%f"}, Count: 100},
},
{
s: "QUERY basicCount\nSELECT count(%f) FROM %m\nDO 100",
stmt: &statement.QueryStatement{Name: "basicCount", TemplateString: "SELECT count(%v) FROM %v", Args: []string{"%f", "%m"}, Count: 100},
},
{
skip: true, // SHOULD CAUSE AN ERROR
s: "QUERY\nSELECT count(%f) FROM %m\nDO 100",
err: "Missing Name",
},
// INSERT
{
s: "INSERT mockCpu\ncpu,\nhost=[us-west|us-east|eu-north],server_id=[str rand(7) 1000]\nbusy=[int rand(1000) 100],free=[float rand(10) 0]\n100000 10s",
stmt: &statement.InsertStatement{
Name: "mockCpu",
TemplateString: "cpu,host=%v,server_id=%v busy=%v,free=%v %v",
TagCount: 2,
Templates: []*statement.Template{
&statement.Template{
Tags: []string{"us-west", "us-east", "eu-north"},
},
&statement.Template{
Function: &statement.Function{Type: "str", Fn: "rand", Argument: 7, Count: 1000},
},
&statement.Template{
Function: &statement.Function{Type: "int", Fn: "rand", Argument: 1000, Count: 100},
},
&statement.Template{
Function: &statement.Function{Type: "float", Fn: "rand", Argument: 10, Count: 0},
},
},
Timestamp: &statement.Timestamp{
Count: 100000,
Duration: time.Duration(10 * time.Second),
},
},
},
{
s: "INSERT mockCpu\ncpu,host=[us-west|us-east|eu-north],server_id=[str rand(7) 1000]\nbusy=[int rand(1000) 100],free=[float rand(10) 0]\n100000 10s",
stmt: &statement.InsertStatement{
Name: "mockCpu",
TemplateString: "cpu,host=%v,server_id=%v busy=%v,free=%v %v",
TagCount: 2,
Templates: []*statement.Template{
&statement.Template{
Tags: []string{"us-west", "us-east", "eu-north"},
},
&statement.Template{
Function: &statement.Function{Type: "str", Fn: "rand", Argument: 7, Count: 1000},
},
&statement.Template{
Function: &statement.Function{Type: "int", Fn: "rand", Argument: 1000, Count: 100},
},
&statement.Template{
Function: &statement.Function{Type: "float", Fn: "rand", Argument: 10, Count: 0},
},
},
Timestamp: &statement.Timestamp{
Count: 100000,
Duration: time.Duration(10 * time.Second),
},
},
},
{
s: "INSERT mockCpu\n[str rand(1000) 10],\nhost=[us-west|us-east|eu-north],server_id=[str rand(7) 1000],other=x\nbusy=[int rand(1000) 100],free=[float rand(10) 0]\n100000 10s",
stmt: &statement.InsertStatement{
Name: "mockCpu",
TemplateString: "%v,host=%v,server_id=%v,other=x busy=%v,free=%v %v",
TagCount: 3,
Templates: []*statement.Template{
&statement.Template{
Function: &statement.Function{Type: "str", Fn: "rand", Argument: 1000, Count: 10},
},
&statement.Template{
Tags: []string{"us-west", "us-east", "eu-north"},
},
&statement.Template{
Function: &statement.Function{Type: "str", Fn: "rand", Argument: 7, Count: 1000},
},
&statement.Template{
Function: &statement.Function{Type: "int", Fn: "rand", Argument: 1000, Count: 100},
},
&statement.Template{
Function: &statement.Function{Type: "float", Fn: "rand", Argument: 10, Count: 0},
},
},
Timestamp: &statement.Timestamp{
Count: 100000,
Duration: time.Duration(10 * time.Second),
},
},
},
{
skip: true, // Expected error not working
s: "INSERT\ncpu,\nhost=[us-west|us-east|eu-north],server_id=[str rand(7) 1000]\nbusy=[int rand(1000) 100],free=[float rand(10) 0]\n100000 10s",
err: `found ",", expected WS`,
},
// EXEC
{
s: `EXEC other_script`,
stmt: &statement.ExecStatement{Script: "other_script"},
},
{
skip: true, // Implement
s: `EXEC other_script.sh`,
stmt: &statement.ExecStatement{Script: "other_script.sh"},
},
{
skip: true, // Implement
s: `EXEC ../other_script.sh`,
stmt: &statement.ExecStatement{Script: "../other_script.sh"},
},
{
skip: true, // Implement
s: `EXEC /path/to/some/other_script.sh`,
stmt: &statement.ExecStatement{Script: "/path/to/some/other_script.sh"},
},
// GO
{
skip: true,
s: "GO INSERT mockCpu\ncpu,\nhost=[us-west|us-east|eu-north],server_id=[str rand(7) 1000]\nbusy=[int rand(1000) 100],free=[float rand(10) 0]\n100000 10s",
stmt: &statement.GoStatement{
Statement: &statement.InsertStatement{
Name: "mockCpu",
TemplateString: "cpu,host=%v,server_id=%v busy=%v,free=%v %v",
Templates: []*statement.Template{
&statement.Template{
Tags: []string{"us-west", "us-east", "eu-north"},
},
&statement.Template{
Function: &statement.Function{Type: "str", Fn: "rand", Argument: 7, Count: 1000},
},
&statement.Template{
Function: &statement.Function{Type: "int", Fn: "rand", Argument: 1000, Count: 100},
},
&statement.Template{
Function: &statement.Function{Type: "float", Fn: "rand", Argument: 10, Count: 0},
},
},
Timestamp: &statement.Timestamp{
Count: 100000,
Duration: time.Duration(10 * time.Second),
},
},
},
},
{
skip: true,
s: "GO QUERY basicCount\nSELECT count(free) FROM cpu\nDO 100",
stmt: &statement.GoStatement{
Statement: &statement.QueryStatement{Name: "basicCount", TemplateString: "SELECT count(free) FROM cpu", Count: 100},
},
},
{
skip: true,
s: `GO EXEC other_script`,
stmt: &statement.GoStatement{
Statement: &statement.ExecStatement{Script: "other_script"},
},
},
// SET
{
s: `SET database [stress]`,
stmt: &statement.SetStatement{Var: "database", Value: "stress"},
},
// WAIT
{
s: `Wait`,
stmt: &statement.WaitStatement{},
},
}
for _, tst := range tests {
if tst.skip {
continue
}
stmt, err := newParserFromString(tst.s).Parse()
tst.stmt.SetID("x")
if err != nil && err.Error() != tst.err {
t.Errorf("REAL ERROR: %v\nExpected ERROR: %v\n", err, tst.err)
} else if err != nil && tst.err == err.Error() {
t.Errorf("REAL ERROR: %v\nExpected ERROR: %v\n", err, tst.err)
} else if stmt.SetID("x"); !reflect.DeepEqual(stmt, tst.stmt) {
t.Errorf("Expected\n%#v\n%#v", tst.stmt, stmt)
}
}
}