From 886046ed9a508315a23d92f04bbfc1881e53ba3a Mon Sep 17 00:00:00 2001 From: Tim Raymond Date: Tue, 15 Aug 2017 17:30:29 -0400 Subject: [PATCH] Switch KapacitorRulesGet to use Pagination Kapacitor responses are paginated, and sometimes users have more than the default 100 tasks that are returned from Kapacitor. This replaces the previous Kapa client with one that automatically follows paginated responses from Kapacitor's ListTasks endpoint and returns the full response. Tests for the KapacitorRulesGet endpoint had to be updated because they did not account for "limit" and "offset", and so led to an infinite loop with the paginated client. A correct kapacitor backend will not have this behavior --- kapacitor/client.go | 12 +++++++++++- kapacitor/kapa_client.go | 4 +++- kapacitor/kapa_client_test.go | 4 ++-- server/kapacitors_test.go | 31 ++++++++++++++++++++++++++++--- 4 files changed, 44 insertions(+), 7 deletions(-) diff --git a/kapacitor/client.go b/kapacitor/client.go index 3f516d8ee..b868a5c57 100644 --- a/kapacitor/client.go +++ b/kapacitor/client.go @@ -12,6 +12,10 @@ import ( const ( // Prefix is prepended to the ID of all alerts Prefix = "chronograf-v1-" + + // DefaultFetchRate is the rate at which paginated responses will be consumed + // from a Kapacitor + FetchRate = 100 ) // Client communicates to kapacitor @@ -325,8 +329,14 @@ func NewKapaClient(url, username, password string) (KapaClient, error) { } } - return client.New(client.Config{ + clnt, err := client.New(client.Config{ URL: url, Credentials: creds, }) + + if err != nil { + return clnt, err + } + + return &PaginatingKapaClient{clnt, FetchRate}, nil } diff --git a/kapacitor/kapa_client.go b/kapacitor/kapa_client.go index 773de7919..603bb486b 100644 --- a/kapacitor/kapa_client.go +++ b/kapacitor/kapa_client.go @@ -1,6 +1,8 @@ package kapacitor -import client "github.com/influxdata/kapacitor/client/v1" +import ( + client "github.com/influxdata/kapacitor/client/v1" +) // ensure PaginatingKapaClient is a KapaClient var _ KapaClient = &PaginatingKapaClient{} diff --git a/kapacitor/kapa_client_test.go b/kapacitor/kapa_client_test.go index a615b3c4f..a1d790fcd 100644 --- a/kapacitor/kapa_client_test.go +++ b/kapacitor/kapa_client_test.go @@ -9,7 +9,7 @@ import ( ) func Test_Kapacitor_PaginatingKapaClient(t *testing.T) { - const lenAllTasks = 200 + const lenAllTasks = 227 // prime, to stress odd result sets // create a mock client that will return a huge response from ListTasks mockClient := &mocks.KapaClient{ @@ -30,7 +30,7 @@ func Test_Kapacitor_PaginatingKapaClient(t *testing.T) { }, } - pkap := kapacitor.PaginatingKapaClient{mockClient, 100} + pkap := kapacitor.PaginatingKapaClient{mockClient, 50} opts := &client.ListTasksOptions{ Limit: 100, diff --git a/server/kapacitors_test.go b/server/kapacitors_test.go index 7869ed81e..6d4ba76e7 100644 --- a/server/kapacitors_test.go +++ b/server/kapacitors_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "net/http" "net/http/httptest" + "strconv" "strings" "testing" @@ -106,6 +107,18 @@ func Test_KapacitorRulesGet(t *testing.T) { // setup mock kapa API kapaSrv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + params := r.URL.Query() + limit, err := strconv.Atoi(params.Get("limit")) + if err != nil { + rw.WriteHeader(http.StatusBadRequest) + return + } + offset, err := strconv.Atoi(params.Get("offset")) + if err != nil { + rw.WriteHeader(http.StatusBadRequest) + return + } + tsks := []map[string]interface{}{} for _, task := range test.mockAlerts { tsks = append(tsks, map[string]interface{}{ @@ -119,11 +132,23 @@ func Test_KapacitorRulesGet(t *testing.T) { }) } - tasks := map[string]interface{}{ - "tasks": tsks, + var tasks map[string]interface{} + + if offset >= len(tsks) { + tasks = map[string]interface{}{ + "tasks": []map[string]interface{}{}, + } + } else if limit+offset > len(tsks) { + tasks = map[string]interface{}{ + "tasks": tsks[offset:], + } + } else { + tasks = map[string]interface{}{ + "tasks": tsks[offset : offset+limit], + } } - err := json.NewEncoder(rw).Encode(&tasks) + err = json.NewEncoder(rw).Encode(&tasks) if err != nil { t.Error("Failed to encode JSON. err:", err) }