Switch KapacitorRulesGet to use Pagination
Kapacitor responses are paginated, and sometimes users have more than the default 100 tasks that are returned from Kapacitor. This replaces the previous Kapa client with one that automatically follows paginated responses from Kapacitor's ListTasks endpoint and returns the full response. Tests for the KapacitorRulesGet endpoint had to be updated because they did not account for "limit" and "offset", and so led to an infinite loop with the paginated client. A correct kapacitor backend will not have this behaviorpull/1886/head
parent
d0be50ab37
commit
886046ed9a
|
@ -12,6 +12,10 @@ import (
|
|||
const (
|
||||
// Prefix is prepended to the ID of all alerts
|
||||
Prefix = "chronograf-v1-"
|
||||
|
||||
// DefaultFetchRate is the rate at which paginated responses will be consumed
|
||||
// from a Kapacitor
|
||||
FetchRate = 100
|
||||
)
|
||||
|
||||
// Client communicates to kapacitor
|
||||
|
@ -325,8 +329,14 @@ func NewKapaClient(url, username, password string) (KapaClient, error) {
|
|||
}
|
||||
}
|
||||
|
||||
return client.New(client.Config{
|
||||
clnt, err := client.New(client.Config{
|
||||
URL: url,
|
||||
Credentials: creds,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return clnt, err
|
||||
}
|
||||
|
||||
return &PaginatingKapaClient{clnt, FetchRate}, nil
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package kapacitor
|
||||
|
||||
import client "github.com/influxdata/kapacitor/client/v1"
|
||||
import (
|
||||
client "github.com/influxdata/kapacitor/client/v1"
|
||||
)
|
||||
|
||||
// ensure PaginatingKapaClient is a KapaClient
|
||||
var _ KapaClient = &PaginatingKapaClient{}
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
)
|
||||
|
||||
func Test_Kapacitor_PaginatingKapaClient(t *testing.T) {
|
||||
const lenAllTasks = 200
|
||||
const lenAllTasks = 227 // prime, to stress odd result sets
|
||||
|
||||
// create a mock client that will return a huge response from ListTasks
|
||||
mockClient := &mocks.KapaClient{
|
||||
|
@ -30,7 +30,7 @@ func Test_Kapacitor_PaginatingKapaClient(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
pkap := kapacitor.PaginatingKapaClient{mockClient, 100}
|
||||
pkap := kapacitor.PaginatingKapaClient{mockClient, 50}
|
||||
|
||||
opts := &client.ListTasksOptions{
|
||||
Limit: 100,
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -106,6 +107,18 @@ func Test_KapacitorRulesGet(t *testing.T) {
|
|||
|
||||
// setup mock kapa API
|
||||
kapaSrv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
params := r.URL.Query()
|
||||
limit, err := strconv.Atoi(params.Get("limit"))
|
||||
if err != nil {
|
||||
rw.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
offset, err := strconv.Atoi(params.Get("offset"))
|
||||
if err != nil {
|
||||
rw.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
tsks := []map[string]interface{}{}
|
||||
for _, task := range test.mockAlerts {
|
||||
tsks = append(tsks, map[string]interface{}{
|
||||
|
@ -119,11 +132,23 @@ func Test_KapacitorRulesGet(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
tasks := map[string]interface{}{
|
||||
"tasks": tsks,
|
||||
var tasks map[string]interface{}
|
||||
|
||||
if offset >= len(tsks) {
|
||||
tasks = map[string]interface{}{
|
||||
"tasks": []map[string]interface{}{},
|
||||
}
|
||||
} else if limit+offset > len(tsks) {
|
||||
tasks = map[string]interface{}{
|
||||
"tasks": tsks[offset:],
|
||||
}
|
||||
} else {
|
||||
tasks = map[string]interface{}{
|
||||
"tasks": tsks[offset : offset+limit],
|
||||
}
|
||||
}
|
||||
|
||||
err := json.NewEncoder(rw).Encode(&tasks)
|
||||
err = json.NewEncoder(rw).Encode(&tasks)
|
||||
if err != nil {
|
||||
t.Error("Failed to encode JSON. err:", err)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue