fix: remove nats for scraper processing (#23107)
* fix: remove nats for scraper processing Scrapers now use go channels instead of NATS and interprocess communication. This should fix #23085 . Additionally, found and fixed #23106 . * chore: fix formatting * chore: fix static check and go.mod * test: fix some flaky tests * fix: mark NATS arguments as deprecatedpull/23120/head
parent
f68758bd94
commit
e20b5e99a6
|
@ -14,13 +14,11 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/kit/cli"
|
||||
"github.com/influxdata/influxdb/v2/kit/signals"
|
||||
influxlogger "github.com/influxdata/influxdb/v2/logger"
|
||||
"github.com/influxdata/influxdb/v2/nats"
|
||||
"github.com/influxdata/influxdb/v2/pprof"
|
||||
"github.com/influxdata/influxdb/v2/sqlite"
|
||||
"github.com/influxdata/influxdb/v2/storage"
|
||||
"github.com/influxdata/influxdb/v2/v1/coordinator"
|
||||
"github.com/influxdata/influxdb/v2/vault"
|
||||
natsserver "github.com/nats-io/gnatsd/server"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap/zapcore"
|
||||
|
@ -226,8 +224,8 @@ func NewOpts(viper *viper.Viper) *InfluxdOpts {
|
|||
StoreType: DiskStore,
|
||||
SecretStore: BoltStore,
|
||||
|
||||
NatsPort: nats.RandomPort,
|
||||
NatsMaxPayloadBytes: natsserver.MAX_PAYLOAD_SIZE,
|
||||
NatsPort: 0,
|
||||
NatsMaxPayloadBytes: 0,
|
||||
|
||||
NoTasks: false,
|
||||
|
||||
|
@ -595,14 +593,16 @@ func (o *InfluxdOpts) BindCliOpts() []cli.Opt {
|
|||
{
|
||||
DestP: &o.NatsPort,
|
||||
Flag: "nats-port",
|
||||
Desc: fmt.Sprintf("Port that should be bound by the NATS streaming server. A value of %d will cause a random port to be selected.", nats.RandomPort),
|
||||
Desc: "deprecated: nats has been replaced",
|
||||
Default: o.NatsPort,
|
||||
Hidden: true,
|
||||
},
|
||||
{
|
||||
DestP: &o.NatsMaxPayloadBytes,
|
||||
Flag: "nats-max-payload-bytes",
|
||||
Desc: "The maximum number of bytes allowed in a NATS message payload.",
|
||||
Desc: "deprecated: nats has been replaced",
|
||||
Default: o.NatsMaxPayloadBytes,
|
||||
Hidden: true,
|
||||
},
|
||||
|
||||
// Pprof config
|
||||
|
|
|
@ -43,7 +43,6 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/kv/migration"
|
||||
"github.com/influxdata/influxdb/v2/kv/migration/all"
|
||||
"github.com/influxdata/influxdb/v2/label"
|
||||
"github.com/influxdata/influxdb/v2/nats"
|
||||
"github.com/influxdata/influxdb/v2/notebooks"
|
||||
notebookTransport "github.com/influxdata/influxdb/v2/notebooks/transport"
|
||||
endpointservice "github.com/influxdata/influxdb/v2/notification/endpoint/service"
|
||||
|
@ -220,6 +219,14 @@ func (m *Launcher) run(ctx context.Context, opts *InfluxdOpts) (err error) {
|
|||
m.log.Debug("loaded config file", zap.String("path", p))
|
||||
}
|
||||
|
||||
if opts.NatsPort != 0 {
|
||||
m.log.Warn("nats-port argument is deprecated and unused")
|
||||
}
|
||||
|
||||
if opts.NatsMaxPayloadBytes != 0 {
|
||||
m.log.Warn("nats-max-payload-bytes argument is deprecated and unused")
|
||||
}
|
||||
|
||||
// Parse feature flags.
|
||||
// These flags can be used to modify the remaining setup logic in this method.
|
||||
// They will also be injected into the contexts of incoming HTTP requests at runtime,
|
||||
|
@ -281,7 +288,7 @@ func (m *Launcher) run(ctx context.Context, opts *InfluxdOpts) (err error) {
|
|||
|
||||
secretStore, err := secret.NewStore(m.kvStore)
|
||||
if err != nil {
|
||||
m.log.Error("Failed creating new meta store", zap.Error(err))
|
||||
m.log.Error("Failed creating new secret store", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -556,53 +563,18 @@ func (m *Launcher) run(ctx context.Context, opts *InfluxdOpts) (err error) {
|
|||
telegrafSvc = telegrafservice.New(m.kvStore)
|
||||
}
|
||||
|
||||
// NATS streaming server
|
||||
natsOpts := nats.NewDefaultServerOptions()
|
||||
natsOpts.Port = opts.NatsPort
|
||||
natsOpts.MaxPayload = opts.NatsMaxPayloadBytes
|
||||
natsServer := nats.NewServer(&natsOpts, m.log.With(zap.String("service", "nats")))
|
||||
if err := natsServer.Open(); err != nil {
|
||||
m.log.Error("Failed to start nats streaming server", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
m.closers = append(m.closers, labeledCloser{
|
||||
label: "nats",
|
||||
closer: func(context.Context) error {
|
||||
natsServer.Close()
|
||||
return nil
|
||||
},
|
||||
})
|
||||
// If a random port was used, the opts will be updated to include the selected value.
|
||||
natsURL := fmt.Sprintf("http://127.0.0.1:%d", natsOpts.Port)
|
||||
publisher := nats.NewAsyncPublisher(m.log, fmt.Sprintf("nats-publisher-%d", natsOpts.Port), natsURL)
|
||||
if err := publisher.Open(); err != nil {
|
||||
m.log.Error("Failed to connect to streaming server", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(jm): this is an example of using a subscriber to consume from the channel. It should be removed.
|
||||
subscriber := nats.NewQueueSubscriber(fmt.Sprintf("nats-subscriber-%d", natsOpts.Port), natsURL)
|
||||
if err := subscriber.Open(); err != nil {
|
||||
m.log.Error("Failed to connect to streaming server", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
subscriber.Subscribe(gather.MetricsSubject, "metrics", gather.NewRecorderHandler(m.log, gather.PointWriter{Writer: pointsWriter}))
|
||||
scraperScheduler, err := gather.NewScheduler(m.log, 10, scraperTargetSvc, publisher, subscriber, 10*time.Second, 30*time.Second)
|
||||
scraperScheduler, err := gather.NewScheduler(m.log.With(zap.String("service", "scraper")), 100, 10, scraperTargetSvc, pointsWriter, 10*time.Second)
|
||||
if err != nil {
|
||||
m.log.Error("Failed to create scraper subscriber", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
m.wg.Add(1)
|
||||
go func(log *zap.Logger) {
|
||||
defer m.wg.Done()
|
||||
log = log.With(zap.String("service", "scraper"))
|
||||
if err := scraperScheduler.Run(ctx); err != nil {
|
||||
log.Error("Failed scraper service", zap.Error(err))
|
||||
}
|
||||
log.Info("Stopping")
|
||||
}(m.log)
|
||||
m.closers = append(m.closers, labeledCloser{
|
||||
label: "scraper",
|
||||
closer: func(ctx context.Context) error {
|
||||
scraperScheduler.Close()
|
||||
return nil
|
||||
},
|
||||
})
|
||||
|
||||
var sessionSvc platform.SessionService
|
||||
{
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/http"
|
||||
"github.com/influxdata/influxdb/v2/tsdb"
|
||||
"github.com/influxdata/influxdb/v2/v1/services/meta"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -210,6 +211,9 @@ func TestLauncher_BucketDelete(t *testing.T) {
|
|||
if got, exp := engine.SeriesCardinality(ctx, l.Bucket.ID), int64(0); got != exp {
|
||||
t.Fatalf("after bucket delete got %d, exp %d", got, exp)
|
||||
}
|
||||
|
||||
databaseInfo := engine.MetaClient().Database(l.Bucket.ID.String())
|
||||
assert.Nil(t, databaseInfo)
|
||||
}
|
||||
|
||||
func TestLauncher_DeleteWithPredicate(t *testing.T) {
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
package gather
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/nats"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// handler implements nats Handler interface.
|
||||
type handler struct {
|
||||
Scraper Scraper
|
||||
Publisher nats.Publisher
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
// Process consumes scraper target from scraper target queue,
|
||||
// call the scraper to gather, and publish to metrics queue.
|
||||
func (h *handler) Process(s nats.Subscription, m nats.Message) {
|
||||
defer m.Ack()
|
||||
|
||||
req := new(influxdb.ScraperTarget)
|
||||
err := json.Unmarshal(m.Data(), req)
|
||||
if err != nil {
|
||||
h.log.Error("Unable to unmarshal json", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
ms, err := h.Scraper.Gather(context.TODO(), *req)
|
||||
if err != nil {
|
||||
h.log.Error("Unable to gather", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
// send metrics to recorder queue
|
||||
buf := new(bytes.Buffer)
|
||||
if err := json.NewEncoder(buf).Encode(ms); err != nil {
|
||||
h.log.Error("Unable to marshal json", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.Publisher.Publish(MetricsSubject, buf); err != nil {
|
||||
h.log.Error("Unable to publish scraper metrics", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
}
|
|
@ -1,7 +1,6 @@
|
|||
package gather
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -32,7 +31,7 @@ func newPrometheusScraper() *prometheusScraper {
|
|||
}
|
||||
|
||||
// Gather parse metrics from a scraper target url.
|
||||
func (p *prometheusScraper) Gather(ctx context.Context, target influxdb.ScraperTarget) (collected MetricsCollection, err error) {
|
||||
func (p *prometheusScraper) Gather(target influxdb.ScraperTarget) (collected MetricsCollection, err error) {
|
||||
var (
|
||||
resp *http.Response
|
||||
)
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
package gather
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/influxdata/influxdb/v2/nats"
|
||||
"github.com/influxdata/influxdb/v2/storage"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// PointWriter will use the storage.PointWriter interface to record metrics.
|
||||
type PointWriter struct {
|
||||
Writer storage.PointsWriter
|
||||
}
|
||||
|
||||
// Record the metrics and write using storage.PointWriter interface.
|
||||
func (s PointWriter) Record(collected MetricsCollection) error {
|
||||
ps, err := collected.MetricsSlice.Points()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.Writer.WritePoints(context.Background(), collected.OrgID, collected.BucketID, ps)
|
||||
}
|
||||
|
||||
// Recorder record the metrics of a time based.
|
||||
type Recorder interface {
|
||||
// Subscriber nats.Subscriber
|
||||
Record(collected MetricsCollection) error
|
||||
}
|
||||
|
||||
// RecorderHandler implements nats.Handler interface.
|
||||
type RecorderHandler struct {
|
||||
Recorder Recorder
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
func NewRecorderHandler(log *zap.Logger, recorder Recorder) *RecorderHandler {
|
||||
return &RecorderHandler{
|
||||
Recorder: recorder,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
// Process consumes job queue, and use recorder to record.
|
||||
func (h *RecorderHandler) Process(s nats.Subscription, m nats.Message) {
|
||||
defer m.Ack()
|
||||
collected := new(MetricsCollection)
|
||||
err := json.Unmarshal(m.Data(), &collected)
|
||||
if err != nil {
|
||||
h.log.Error("Recorder handler error", zap.Error(err))
|
||||
return
|
||||
}
|
||||
err = h.Recorder.Record(*collected)
|
||||
if err != nil {
|
||||
h.log.Error("Recorder handler error", zap.Error(err))
|
||||
}
|
||||
}
|
|
@ -1,135 +1,136 @@
|
|||
package gather
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/kit/tracing"
|
||||
"github.com/influxdata/influxdb/v2/nats"
|
||||
"github.com/influxdata/influxdb/v2/storage"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// nats subjects
|
||||
const (
|
||||
MetricsSubject = "metrics"
|
||||
promTargetSubject = "promTarget"
|
||||
)
|
||||
|
||||
// Scheduler is struct to run scrape jobs.
|
||||
type Scheduler struct {
|
||||
Targets influxdb.ScraperTargetStoreService
|
||||
// Interval is between each metrics gathering event.
|
||||
Interval time.Duration
|
||||
// Timeout is the maximum time duration allowed by each TCP request
|
||||
Timeout time.Duration
|
||||
|
||||
// Publisher will send the gather requests and gathered metrics to the queue.
|
||||
Publisher nats.Publisher
|
||||
|
||||
log *zap.Logger
|
||||
|
||||
gather chan struct{}
|
||||
scrapeRequest chan *influxdb.ScraperTarget
|
||||
done chan struct{}
|
||||
wg sync.WaitGroup
|
||||
writer storage.PointsWriter
|
||||
}
|
||||
|
||||
// NewScheduler creates a new Scheduler and subscriptions for scraper jobs.
|
||||
func NewScheduler(
|
||||
log *zap.Logger,
|
||||
numScrapers int,
|
||||
scrapeQueueLength int,
|
||||
scrapesInProgress int,
|
||||
targets influxdb.ScraperTargetStoreService,
|
||||
p nats.Publisher,
|
||||
s nats.Subscriber,
|
||||
writer storage.PointsWriter,
|
||||
interval time.Duration,
|
||||
timeout time.Duration,
|
||||
) (*Scheduler, error) {
|
||||
if interval == 0 {
|
||||
interval = 60 * time.Second
|
||||
}
|
||||
if timeout == 0 {
|
||||
timeout = 30 * time.Second
|
||||
}
|
||||
scheduler := &Scheduler{
|
||||
Targets: targets,
|
||||
Interval: interval,
|
||||
Timeout: timeout,
|
||||
Publisher: p,
|
||||
log: log,
|
||||
gather: make(chan struct{}, 100),
|
||||
Targets: targets,
|
||||
Interval: interval,
|
||||
log: log,
|
||||
scrapeRequest: make(chan *influxdb.ScraperTarget, scrapeQueueLength),
|
||||
done: make(chan struct{}),
|
||||
|
||||
writer: writer,
|
||||
}
|
||||
|
||||
for i := 0; i < numScrapers; i++ {
|
||||
err := s.Subscribe(promTargetSubject, "metrics", &handler{
|
||||
Scraper: newPrometheusScraper(),
|
||||
Publisher: p,
|
||||
log: log,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scheduler.wg.Add(1)
|
||||
scraperPool := make(chan *prometheusScraper, scrapesInProgress)
|
||||
for i := 0; i < scrapesInProgress; i++ {
|
||||
scraperPool <- newPrometheusScraper()
|
||||
}
|
||||
go func() {
|
||||
defer scheduler.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case req := <-scheduler.scrapeRequest:
|
||||
select {
|
||||
// Each request much acquire a scraper from the (limited) pool to run the scrape,
|
||||
// then return it to the pool
|
||||
case scraper := <-scraperPool:
|
||||
scheduler.doScrape(scraper, req, func(s *prometheusScraper) {
|
||||
scraperPool <- s
|
||||
})
|
||||
case <-scheduler.done:
|
||||
return
|
||||
}
|
||||
case <-scheduler.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
scheduler.wg.Add(1)
|
||||
go func() {
|
||||
defer scheduler.wg.Done()
|
||||
ticker := time.NewTicker(scheduler.Interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-scheduler.done:
|
||||
return
|
||||
case <-ticker.C:
|
||||
scheduler.doGather()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return scheduler, nil
|
||||
}
|
||||
|
||||
// Run will retrieve scraper targets from the target storage,
|
||||
// and publish them to nats job queue for gather.
|
||||
func (s *Scheduler) Run(ctx context.Context) error {
|
||||
go func(s *Scheduler, ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(s.Interval): // TODO: change to ticker because of garbage collection
|
||||
s.gather <- struct{}{}
|
||||
}
|
||||
func (s *Scheduler) doScrape(scraper *prometheusScraper, req *influxdb.ScraperTarget, releaseScraper func(s *prometheusScraper)) {
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
defer releaseScraper(scraper)
|
||||
logger := s.log.With(zap.String("scraper-name", req.Name))
|
||||
if req == nil {
|
||||
return
|
||||
}
|
||||
}(s, ctx)
|
||||
return s.run(ctx)
|
||||
ms, err := scraper.Gather(*req)
|
||||
if err != nil {
|
||||
logger.Error("Unable to gather", zap.Error(err))
|
||||
return
|
||||
}
|
||||
ps, err := ms.MetricsSlice.Points()
|
||||
if err != nil {
|
||||
logger.Error("Unable to gather list of points", zap.Error(err))
|
||||
}
|
||||
err = s.writer.WritePoints(context.Background(), ms.OrgID, ms.BucketID, ps)
|
||||
if err != nil {
|
||||
logger.Error("Unable to write gathered points", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *Scheduler) run(ctx context.Context) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-s.gather:
|
||||
s.doGather(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scheduler) doGather(ctx context.Context) {
|
||||
ctx, cancel := context.WithTimeout(ctx, s.Timeout)
|
||||
defer cancel()
|
||||
span, ctx := tracing.StartSpanFromContext(ctx)
|
||||
defer span.Finish()
|
||||
|
||||
targets, err := s.Targets.ListTargets(ctx, influxdb.ScraperTargetFilter{})
|
||||
func (s *Scheduler) doGather() {
|
||||
targets, err := s.Targets.ListTargets(context.Background(), influxdb.ScraperTargetFilter{})
|
||||
if err != nil {
|
||||
s.log.Error("Cannot list targets", zap.Error(err))
|
||||
tracing.LogError(span, err)
|
||||
return
|
||||
}
|
||||
for _, target := range targets {
|
||||
if err := requestScrape(target, s.Publisher); err != nil {
|
||||
s.log.Error("JSON encoding error", zap.Error(err))
|
||||
tracing.LogError(span, err)
|
||||
select {
|
||||
case s.scrapeRequest <- &target:
|
||||
default:
|
||||
s.log.Warn("Skipping scrape due to scraper backlog", zap.String("target", target.Name))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func requestScrape(t influxdb.ScraperTarget, publisher nats.Publisher) error {
|
||||
buf := new(bytes.Buffer)
|
||||
err := json.NewEncoder(buf).Encode(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch t.Type {
|
||||
case influxdb.PrometheusScraperType:
|
||||
return publisher.Publish(promTargetSubject, buf)
|
||||
}
|
||||
return fmt.Errorf("unsupported target scrape type: %s", t.Type)
|
||||
func (s *Scheduler) Close() {
|
||||
close(s.done)
|
||||
s.wg.Wait()
|
||||
}
|
||||
|
|
|
@ -6,17 +6,18 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/kit/platform"
|
||||
"github.com/influxdata/influxdb/v2/mock"
|
||||
"github.com/influxdata/influxdb/v2/models"
|
||||
influxdbtesting "github.com/influxdata/influxdb/v2/testing"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
func TestScheduler(t *testing.T) {
|
||||
publisher, subscriber := mock.NewNats()
|
||||
totalGatherJobs := 3
|
||||
totalGatherJobs := 20
|
||||
|
||||
// Create top level logger
|
||||
logger := zaptest.NewLogger(t)
|
||||
|
@ -25,8 +26,7 @@ func TestScheduler(t *testing.T) {
|
|||
"/metrics": sampleRespSmall,
|
||||
},
|
||||
})
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
defer ts.Close()
|
||||
|
||||
storage := &mockStorage{
|
||||
Metrics: make(map[time.Time]Metrics),
|
||||
|
@ -39,56 +39,37 @@ func TestScheduler(t *testing.T) {
|
|||
BucketID: *bucketID,
|
||||
},
|
||||
},
|
||||
TotalGatherJobs: make(chan struct{}, totalGatherJobs),
|
||||
}
|
||||
|
||||
subscriber.Subscribe(MetricsSubject, "", &RecorderHandler{
|
||||
log: logger,
|
||||
Recorder: storage,
|
||||
})
|
||||
|
||||
scheduler, err := NewScheduler(logger, 10, storage, publisher, subscriber, time.Millisecond, time.Microsecond)
|
||||
|
||||
go func() {
|
||||
err = scheduler.run(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
gatherJobs := make(chan []models.Point)
|
||||
done := make(chan struct{})
|
||||
writer := &mock.PointsWriter{}
|
||||
writer.WritePointsFn = func(ctx context.Context, orgID platform.ID, bucketID platform.ID, points []models.Point) error {
|
||||
select {
|
||||
case gatherJobs <- points:
|
||||
case <-done:
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
go func(scheduler *Scheduler) {
|
||||
// let scheduler gather #{totalGatherJobs} metrics.
|
||||
for i := 0; i < totalGatherJobs; i++ {
|
||||
// make sure timestamp don't overwrite each other
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
scheduler.gather <- struct{}{}
|
||||
}
|
||||
}(scheduler)
|
||||
scheduler, err := NewScheduler(logger, 10, 2, storage, writer, 1*time.Millisecond)
|
||||
require.NoError(t, err)
|
||||
defer scheduler.Close()
|
||||
defer close(done) //don't block the points writer forever
|
||||
|
||||
// make sure all jobs are done
|
||||
pointWrites := [][]models.Point{}
|
||||
for i := 0; i < totalGatherJobs; i++ {
|
||||
<-storage.TotalGatherJobs
|
||||
newWrite := <-gatherJobs
|
||||
pointWrites = append(pointWrites, newWrite)
|
||||
assert.Equal(t, 1, len(newWrite))
|
||||
newWrite[0].SetTime(time.Unix(0, 0)) // zero out the time so we don't have to compare it
|
||||
assert.Equal(t, "go_goroutines gauge=36 0", newWrite[0].String())
|
||||
}
|
||||
|
||||
want := Metrics{
|
||||
Name: "go_goroutines",
|
||||
Type: dto.MetricType_GAUGE,
|
||||
Tags: map[string]string{},
|
||||
Fields: map[string]interface{}{
|
||||
"gauge": float64(36),
|
||||
},
|
||||
}
|
||||
|
||||
if len(storage.Metrics) < totalGatherJobs {
|
||||
if len(pointWrites) < totalGatherJobs {
|
||||
t.Fatalf("metrics stored less than expected, got len %d", len(storage.Metrics))
|
||||
}
|
||||
|
||||
for _, v := range storage.Metrics {
|
||||
if diff := cmp.Diff(v, want, metricsCmpOption); diff != "" {
|
||||
t.Fatalf("scraper parse metrics want %v, got %v", want, v)
|
||||
}
|
||||
}
|
||||
ts.Close()
|
||||
}
|
||||
|
||||
const sampleRespSmall = `
|
||||
|
|
|
@ -132,7 +132,7 @@ func TestPrometheusScraper(t *testing.T) {
|
|||
defer ts.Close()
|
||||
url = ts.URL
|
||||
}
|
||||
results, err := scraper.Gather(context.Background(), influxdb.ScraperTarget{
|
||||
results, err := scraper.Gather(influxdb.ScraperTarget{
|
||||
URL: url + "/metrics",
|
||||
OrgID: *orgID,
|
||||
BucketID: *bucketID,
|
||||
|
@ -196,19 +196,8 @@ type mockStorage struct {
|
|||
sync.RWMutex
|
||||
influxdb.UserResourceMappingService
|
||||
influxdb.OrganizationService
|
||||
TotalGatherJobs chan struct{}
|
||||
Metrics map[time.Time]Metrics
|
||||
Targets []influxdb.ScraperTarget
|
||||
}
|
||||
|
||||
func (s *mockStorage) Record(collected MetricsCollection) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for _, m := range collected.MetricsSlice {
|
||||
s.Metrics[m.Timestamp] = m
|
||||
}
|
||||
s.TotalGatherJobs <- struct{}{}
|
||||
return nil
|
||||
Metrics map[time.Time]Metrics
|
||||
Targets []influxdb.ScraperTarget
|
||||
}
|
||||
|
||||
func (s *mockStorage) ListTargets(ctx context.Context, filter influxdb.ScraperTargetFilter) (targets []influxdb.ScraperTarget, err error) {
|
||||
|
|
14
go.mod
14
go.mod
|
@ -11,7 +11,6 @@ require (
|
|||
github.com/apache/arrow/go/arrow v0.0.0-20210722123801-4591d76fce28
|
||||
github.com/benbjohnson/clock v0.0.0-20161215174838-7dc76406b6d3
|
||||
github.com/benbjohnson/tmpl v1.0.0
|
||||
github.com/boltdb/bolt v1.3.1 // indirect
|
||||
github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e
|
||||
github.com/cespare/xxhash v1.1.0
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
|
@ -31,9 +30,7 @@ require (
|
|||
github.com/google/btree v1.0.1
|
||||
github.com/google/go-cmp v0.5.6
|
||||
github.com/google/go-jsonnet v0.17.0
|
||||
github.com/hashicorp/go-msgpack v0.0.0-20150518234257-fa3f63826f7c // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.6.4 // indirect
|
||||
github.com/hashicorp/raft v1.0.0 // indirect
|
||||
github.com/hashicorp/vault/api v1.0.2
|
||||
github.com/imdario/mergo v0.3.9 // indirect
|
||||
github.com/influxdata/cron v0.0.0-20201006132531-4bb0a200dcbe
|
||||
|
@ -52,12 +49,6 @@ require (
|
|||
github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5
|
||||
github.com/mna/pigeon v1.0.1-0.20180808201053-bb0192cfc2ae
|
||||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae // indirect
|
||||
github.com/nats-io/gnatsd v1.3.0
|
||||
github.com/nats-io/go-nats v1.7.0 // indirect
|
||||
github.com/nats-io/go-nats-streaming v0.4.0
|
||||
github.com/nats-io/nats-streaming-server v0.11.2
|
||||
github.com/nats-io/nkeys v0.0.2 // indirect
|
||||
github.com/nats-io/nuid v1.0.0 // indirect
|
||||
github.com/onsi/ginkgo v1.11.0 // indirect
|
||||
github.com/onsi/gomega v1.8.1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
|
@ -92,11 +83,9 @@ require (
|
|||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
|
||||
golang.org/x/tools v0.1.4
|
||||
google.golang.org/protobuf v1.27.1
|
||||
gopkg.in/vmihailenco/msgpack.v2 v2.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c
|
||||
honnef.co/go/tools v0.2.0
|
||||
labix.org/v2/mgo v0.0.0-20140701140051-000000000287 // indirect
|
||||
)
|
||||
|
||||
require github.com/influxdata/influx-cli/v2 v2.2.1-0.20211129214229-4c0fae3a4c0d
|
||||
|
@ -122,7 +111,6 @@ require (
|
|||
github.com/Microsoft/go-winio v0.4.11 // indirect
|
||||
github.com/SAP/go-hdb v0.14.1 // indirect
|
||||
github.com/aokoli/goutils v1.0.1 // indirect
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect
|
||||
github.com/aws/aws-sdk-go v1.29.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.3.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.1.5 // indirect
|
||||
|
@ -159,11 +147,9 @@ require (
|
|||
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.0.0 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.0 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.1 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hashicorp/vault/sdk v0.1.8 // indirect
|
||||
github.com/huandu/xstrings v1.0.0 // indirect
|
||||
|
|
33
go.sum
33
go.sum
|
@ -109,7 +109,6 @@ github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QN
|
|||
github.com/apache/arrow/go/arrow v0.0.0-20210722123801-4591d76fce28 h1:6ZRbTsAQWpML1HK8xOpZEAH9JQ/0X6VcjUjmovKcOQA=
|
||||
github.com/apache/arrow/go/arrow v0.0.0-20210722123801-4591d76fce28/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aws/aws-sdk-go v1.29.16 h1:Gbtod7Y4W/Ai7wPtesdvgGVTkFN8JxAaGouRLlcQfQs=
|
||||
|
@ -149,8 +148,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
|||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0 h1:MaVh0h9+KaMnJcoDvvIGp+O3fefdWm+8MBUX6ELTJTM=
|
||||
github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ=
|
||||
github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e h1:oJCXMss/3rg5F6Poy9wG3JQusc58Mzk5B9Z6wSnssNE=
|
||||
|
@ -380,10 +377,7 @@ github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9
|
|||
github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.0.0-20150518234257-fa3f63826f7c h1:BTAbnbegUIMB6xmQCwWE8yRzbA4XSpnZY5hvRJC188I=
|
||||
github.com/hashicorp/go-msgpack v0.0.0-20150518234257-fa3f63826f7c/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
|
||||
|
@ -395,16 +389,12 @@ github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa
|
|||
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/raft v1.0.0 h1:htBVktAOtGs4Le5Z7K8SF5H2+oWsQFYVmOgH5loro7Y=
|
||||
github.com/hashicorp/raft v1.0.0/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI=
|
||||
github.com/hashicorp/vault/api v1.0.2 h1:/V9fULvLwt58vme/6Rkt/p/GtlresQv+Z9E6dgdANhs=
|
||||
github.com/hashicorp/vault/api v1.0.2/go.mod h1:AV/+M5VPDpB90arloVX0rVDUIHkONiwz5Uza9HRtpUE=
|
||||
github.com/hashicorp/vault/sdk v0.1.8 h1:pfF3KwA1yPlfpmcumNsFM4uo91WMasX5gTuIkItu9r0=
|
||||
|
@ -426,8 +416,6 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
|
|||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/cron v0.0.0-20201006132531-4bb0a200dcbe h1:7j4SdN/BvQwN6WoUq7mv0kg5U9NhnFBxPGMafYRKym0=
|
||||
github.com/influxdata/cron v0.0.0-20201006132531-4bb0a200dcbe/go.mod h1:XabtPPW2qsCg0tl+kjaPU+cFS+CjQXEXbT1VJvHT4og=
|
||||
github.com/influxdata/flux v0.150.0 h1:kOcqbXZO6XuSV08KPceuMJHrU6M1TH9MNFONkGsYaaw=
|
||||
github.com/influxdata/flux v0.150.0/go.mod h1:qw7WkFloKAt9dEh0QzqP9avTzz0Nb4iBFHnmj5yYJ8o=
|
||||
github.com/influxdata/flux v0.152.0 h1:MimFI4efC+6Zd+zFvx3A5/cWf+NFhAWlqDMWVBPOCME=
|
||||
github.com/influxdata/flux v0.152.0/go.mod h1:qjuLZJvOoMUBcubg+qNrc0pLbG55iRCVNokwq/8q7is=
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 h1:WQsmW0fXO4ZE/lFGIE84G6rIV5SJN3P3sjIXAP1a8eU=
|
||||
|
@ -440,10 +428,7 @@ github.com/influxdata/influxql v1.1.1-0.20211004132434-7e7d61973256 h1:8io3jjCJ0
|
|||
github.com/influxdata/influxql v1.1.1-0.20211004132434-7e7d61973256/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
|
||||
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU=
|
||||
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
||||
github.com/influxdata/nats-streaming-server v0.11.3-0.20201112040610-c277f7560803 h1:LpaVAM5Www2R7M0GJAxAdL3swBvmna8Pyzw6F7o+j04=
|
||||
github.com/influxdata/nats-streaming-server v0.11.3-0.20201112040610-c277f7560803/go.mod h1:qgAMR6M9EokX+R5X7jUQfubwBdS1tBIl4yVJ3shhcWk=
|
||||
github.com/influxdata/pkg-config v0.2.10 h1:JRG4QZZc+0VLPA48af/BSDXxu5Xriwa/DfzUCk8+3FA=
|
||||
github.com/influxdata/pkg-config v0.2.10/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk=
|
||||
github.com/influxdata/pkg-config v0.2.11 h1:RDlWAvkTARzPRGChq34x179TYlRndq8OU5Ro80E9g3Q=
|
||||
github.com/influxdata/pkg-config v0.2.11/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk=
|
||||
github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
|
||||
github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b h1:i44CesU68ZBRvtCjBi3QSosCIKrjmMbYlQMFAwVLds4=
|
||||
|
@ -555,16 +540,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
|
|||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=
|
||||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nats-io/gnatsd v1.3.0 h1:+5d80klu3QaJgNbdavVBjWJP7cHd11U2CLnRTFM9ICI=
|
||||
github.com/nats-io/gnatsd v1.3.0/go.mod h1:nqco77VO78hLCJpIcVfygDP2rPGfsEHkGTUk94uh5DQ=
|
||||
github.com/nats-io/go-nats v1.7.0 h1:oQOfHcLr8hb43QG8yeVyY2jtarIaTjOv41CGdF3tTvQ=
|
||||
github.com/nats-io/go-nats v1.7.0/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0=
|
||||
github.com/nats-io/go-nats-streaming v0.4.0 h1:00wOBnTKzZGvQOFRSxj18kUm4X2TvXzv8LS0skZegPc=
|
||||
github.com/nats-io/go-nats-streaming v0.4.0/go.mod h1:gfq4R3c9sKAINOpelo0gn/b9QDMBZnmrttcsNF+lqyo=
|
||||
github.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M=
|
||||
github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
|
||||
github.com/nats-io/nuid v1.0.0 h1:44QGdhbiANq8ZCbUkdn6W5bqtg+mHuDE4wOUuxxndFs=
|
||||
github.com/nats-io/nuid v1.0.0/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
|
@ -580,7 +555,6 @@ github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQ
|
|||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
|
@ -756,7 +730,6 @@ go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
|
|||
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
|
@ -1182,8 +1155,6 @@ gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4
|
|||
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/vmihailenco/msgpack.v2 v2.9.1 h1:kb0VV7NuIojvRfzwslQeP3yArBqJHW9tOl4t38VS1jM=
|
||||
gopkg.in/vmihailenco/msgpack.v2 v2.9.1/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn+AEGwNEOatn8=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
@ -1206,8 +1177,6 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
|
|||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.2.0 h1:ws8AfbgTX3oIczLPNPCu5166oBg9ST2vNs0rcht+mDE=
|
||||
honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY=
|
||||
labix.org/v2/mgo v0.0.0-20140701140051-000000000287 h1:L0cnkNl4TfAXzvdrqsYEmxOHOCv2p5I3taaReO8BWFs=
|
||||
labix.org/v2/mgo v0.0.0-20140701140051-000000000287/go.mod h1:Lg7AYkt1uXJoR9oeSZ3W/8IXLdvOfIITgZnommstyz4=
|
||||
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
|
|
115
mock/nats.go
115
mock/nats.go
|
@ -1,115 +0,0 @@
|
|||
package mock
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/influxdb/v2/nats"
|
||||
)
|
||||
|
||||
// NatsServer is the mocked nats server based buffered channel.
|
||||
type NatsServer struct {
|
||||
sync.RWMutex
|
||||
queue map[string]chan io.Reader
|
||||
}
|
||||
|
||||
// create an empty channel for a subject
|
||||
func (s *NatsServer) initSubject(subject string) (chan io.Reader, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if _, ok := s.queue[subject]; !ok {
|
||||
s.queue[subject] = make(chan io.Reader)
|
||||
}
|
||||
return s.queue[subject], nil
|
||||
}
|
||||
|
||||
// NewNats returns a mocked version of publisher, subscriber
|
||||
func NewNats() (nats.Publisher, nats.Subscriber) {
|
||||
server := &NatsServer{
|
||||
queue: make(map[string]chan io.Reader),
|
||||
}
|
||||
|
||||
publisher := &NatsPublisher{
|
||||
server: server,
|
||||
}
|
||||
|
||||
subscriber := &NatsSubscriber{
|
||||
server: server,
|
||||
}
|
||||
|
||||
return publisher, subscriber
|
||||
}
|
||||
|
||||
// NatsPublisher is a mocked nats publisher.
|
||||
type NatsPublisher struct {
|
||||
server *NatsServer
|
||||
}
|
||||
|
||||
// Publish add subject and msg to server.
|
||||
func (p *NatsPublisher) Publish(subject string, r io.Reader) error {
|
||||
_, err := p.server.initSubject(subject)
|
||||
p.server.queue[subject] <- r
|
||||
return err
|
||||
}
|
||||
|
||||
// NatsSubscriber is mocked nats subscriber.
|
||||
type NatsSubscriber struct {
|
||||
server *NatsServer
|
||||
}
|
||||
|
||||
// Subscribe implements nats.Subscriber interface.
|
||||
func (s *NatsSubscriber) Subscribe(subject, group string, handler nats.Handler) error {
|
||||
ch, err := s.server.initSubject(subject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func(s *NatsSubscriber, subject string, handler nats.Handler) {
|
||||
for r := range ch {
|
||||
handler.Process(&natsSubscription{subject: subject},
|
||||
&natsMessage{
|
||||
r: r,
|
||||
},
|
||||
)
|
||||
}
|
||||
}(s, subject, handler)
|
||||
return nil
|
||||
}
|
||||
|
||||
type natsMessage struct {
|
||||
r io.Reader
|
||||
read bool
|
||||
bytes []byte
|
||||
}
|
||||
|
||||
func (m *natsMessage) Data() []byte {
|
||||
if m.read {
|
||||
return m.bytes
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(m.r)
|
||||
m.bytes = buf.Bytes()
|
||||
m.read = true
|
||||
return m.bytes
|
||||
}
|
||||
|
||||
func (m *natsMessage) Ack() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type natsSubscription struct {
|
||||
subject string
|
||||
}
|
||||
|
||||
func (s *natsSubscription) Pending() (int64, int64, error) {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
func (s *natsSubscription) Delivered() (int64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (s *natsSubscription) Close() error {
|
||||
return nil
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
package mock
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/influxdb/v2/nats"
|
||||
)
|
||||
|
||||
// TestNats use the mocked nats publisher and subscriber
|
||||
// try to collect 0~total integers
|
||||
func TestNats(t *testing.T) {
|
||||
total := 30
|
||||
workers := 3
|
||||
publisher, subscriber := NewNats()
|
||||
subject := "default"
|
||||
h := &fakeNatsHandler{
|
||||
collector: make([]int, 0),
|
||||
totalJobs: make(chan struct{}, total),
|
||||
}
|
||||
for i := 0; i < workers; i++ {
|
||||
subscriber.Subscribe(subject, "", h)
|
||||
}
|
||||
|
||||
for i := 0; i < total; i++ {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.Write([]byte{uint8(i)})
|
||||
go publisher.Publish(subject, buf)
|
||||
}
|
||||
|
||||
// make sure all done
|
||||
for i := 0; i < total; i++ {
|
||||
<-h.totalJobs
|
||||
}
|
||||
|
||||
sort.Slice(h.collector, func(i, j int) bool {
|
||||
return h.collector[i] < h.collector[j]
|
||||
})
|
||||
|
||||
for i := 0; i < total; i++ {
|
||||
if h.collector[i] != i {
|
||||
t.Fatalf("nats mocking got incorrect result, want %d, got %d", i, h.collector[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fakeNatsHandler struct {
|
||||
sync.Mutex
|
||||
collector []int
|
||||
totalJobs chan struct{}
|
||||
}
|
||||
|
||||
func (h *fakeNatsHandler) Process(s nats.Subscription, m nats.Message) {
|
||||
h.Lock()
|
||||
defer h.Unlock()
|
||||
defer m.Ack()
|
||||
i := m.Data()
|
||||
h.collector = append(h.collector, int(i[0]))
|
||||
h.totalJobs <- struct{}{}
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
package nats
|
||||
|
||||
import "go.uber.org/zap"
|
||||
|
||||
type Handler interface {
|
||||
// Process does something with a received subscription message, then acks it.
|
||||
Process(s Subscription, m Message)
|
||||
}
|
||||
|
||||
type LogHandler struct {
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
func (lh *LogHandler) Process(s Subscription, m Message) {
|
||||
lh.log.Info(string(m.Data()))
|
||||
m.Ack()
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package nats
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
natsserver "github.com/nats-io/gnatsd/server"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var _ natsserver.Logger = (*zapLoggerAdapter)(nil)
|
||||
|
||||
// zapLogger
|
||||
type zapLoggerAdapter struct {
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
func (z *zapLoggerAdapter) Noticef(format string, v ...interface{}) {
|
||||
z.log.Debug(fmt.Sprintf(format, v...), zap.String("nats_level", "notice"))
|
||||
}
|
||||
|
||||
func (z *zapLoggerAdapter) Debugf(format string, v ...interface{}) {
|
||||
z.log.Debug(fmt.Sprintf(format, v...), zap.String("nats_level", "debug"))
|
||||
}
|
||||
|
||||
func (z *zapLoggerAdapter) Tracef(format string, v ...interface{}) {
|
||||
z.log.Debug(fmt.Sprintf(format, v...), zap.String("nats_level", "trace"))
|
||||
}
|
||||
|
||||
func (z *zapLoggerAdapter) Fatalf(format string, v ...interface{}) {
|
||||
z.log.Fatal(fmt.Sprintf(format, v...), zap.String("nats_level", "fatal"))
|
||||
}
|
||||
|
||||
func (z *zapLoggerAdapter) Errorf(format string, v ...interface{}) {
|
||||
z.log.Error(fmt.Sprintf(format, v...), zap.String("nats_level", "error"))
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
package nats
|
||||
|
||||
import (
|
||||
stan "github.com/nats-io/go-nats-streaming"
|
||||
)
|
||||
|
||||
type Message interface {
|
||||
Data() []byte
|
||||
Ack() error
|
||||
}
|
||||
|
||||
type message struct {
|
||||
m *stan.Msg
|
||||
}
|
||||
|
||||
func (m *message) Data() []byte {
|
||||
return m.m.Data
|
||||
}
|
||||
|
||||
func (m *message) Ack() error {
|
||||
return m.m.Ack()
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
package nats
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
stan "github.com/nats-io/go-nats-streaming"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Publisher interface {
|
||||
// Publish a new message to channel
|
||||
Publish(subject string, r io.Reader) error
|
||||
}
|
||||
|
||||
type AsyncPublisher struct {
|
||||
ClientID string
|
||||
Connection stan.Conn
|
||||
log *zap.Logger
|
||||
Addr string
|
||||
}
|
||||
|
||||
func NewAsyncPublisher(log *zap.Logger, clientID string, addr string) *AsyncPublisher {
|
||||
return &AsyncPublisher{
|
||||
ClientID: clientID,
|
||||
log: log,
|
||||
Addr: addr,
|
||||
}
|
||||
}
|
||||
|
||||
// Open creates and maintains a connection to NATS server
|
||||
func (p *AsyncPublisher) Open() error {
|
||||
sc, err := stan.Connect(ServerName, p.ClientID, stan.NatsURL(p.Addr))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Connection = sc
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AsyncPublisher) Publish(subject string, r io.Reader) error {
|
||||
if p.Connection == nil {
|
||||
return ErrNoNatsConnection
|
||||
}
|
||||
|
||||
ah := func(guid string, err error) {
|
||||
if err != nil {
|
||||
p.log.Info(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = p.Connection.PublishAsync(subject, data, ah)
|
||||
return err
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
package nats
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/nats-io/gnatsd/server"
|
||||
sserver "github.com/nats-io/nats-streaming-server/server"
|
||||
"github.com/nats-io/nats-streaming-server/stores"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const ServerName = "platform"
|
||||
|
||||
const (
|
||||
// RandomPort is the value for port that, when supplied, will cause the
|
||||
// server to listen on a randomly-chosen available port. The resolved port
|
||||
// will be reassigned to the Port field of server.Options.
|
||||
RandomPort = server.RANDOM_PORT
|
||||
)
|
||||
|
||||
var ErrNoNatsConnection = errors.New("nats connection has not been established. Call Open() first")
|
||||
|
||||
// Server wraps a connection to a NATS streaming server
|
||||
type Server struct {
|
||||
serverOpts *server.Options
|
||||
Server *sserver.StanServer
|
||||
logger server.Logger
|
||||
}
|
||||
|
||||
// Open starts a NATS streaming server
|
||||
func (s *Server) Open() error {
|
||||
// Streaming options
|
||||
opts := sserver.GetDefaultOptions()
|
||||
opts.StoreType = stores.TypeMemory
|
||||
opts.ID = ServerName
|
||||
opts.CustomLogger = s.logger
|
||||
|
||||
server, err := sserver.RunServerWithOpts(opts, s.serverOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Server = server
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close stops the embedded NATS server.
|
||||
func (s *Server) Close() {
|
||||
s.Server.Shutdown()
|
||||
}
|
||||
|
||||
// NewDefaultServerOptions returns the default NATS server options, allowing the
|
||||
// caller to override specific fields.
|
||||
func NewDefaultServerOptions() server.Options {
|
||||
return sserver.DefaultNatsServerOptions
|
||||
}
|
||||
|
||||
// NewServer creates a new streaming server with the provided server options.
|
||||
func NewServer(opts *server.Options, log *zap.Logger) *Server {
|
||||
if opts == nil {
|
||||
o := NewDefaultServerOptions()
|
||||
opts = &o
|
||||
}
|
||||
return &Server{serverOpts: opts, logger: &zapLoggerAdapter{log}}
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
package nats
|
||||
|
||||
import (
|
||||
stan "github.com/nats-io/go-nats-streaming"
|
||||
)
|
||||
|
||||
type Subscriber interface {
|
||||
// Subscribe listens to a channel, handling messages with Handler
|
||||
Subscribe(subject, group string, handler Handler) error
|
||||
}
|
||||
|
||||
type QueueSubscriber struct {
|
||||
ClientID string
|
||||
Connection stan.Conn
|
||||
Addr string
|
||||
}
|
||||
|
||||
func NewQueueSubscriber(clientID string, addr string) *QueueSubscriber {
|
||||
return &QueueSubscriber{ClientID: clientID, Addr: addr}
|
||||
}
|
||||
|
||||
// Open creates and maintains a connection to NATS server
|
||||
func (s *QueueSubscriber) Open() error {
|
||||
sc, err := stan.Connect(ServerName, s.ClientID, stan.NatsURL(s.Addr))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Connection = sc
|
||||
return nil
|
||||
}
|
||||
|
||||
type messageHandler struct {
|
||||
handler Handler
|
||||
sub subscription
|
||||
}
|
||||
|
||||
func (mh *messageHandler) handle(m *stan.Msg) {
|
||||
mh.handler.Process(mh.sub, &message{m: m})
|
||||
}
|
||||
|
||||
func (s *QueueSubscriber) Subscribe(subject, group string, handler Handler) error {
|
||||
if s.Connection == nil {
|
||||
return ErrNoNatsConnection
|
||||
}
|
||||
|
||||
mh := messageHandler{handler: handler}
|
||||
sub, err := s.Connection.QueueSubscribe(subject, group, mh.handle, stan.DurableName(group), stan.SetManualAckMode(), stan.MaxInflight(25))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mh.sub = subscription{sub: sub}
|
||||
return nil
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
package nats
|
||||
|
||||
import stan "github.com/nats-io/go-nats-streaming"
|
||||
|
||||
type Subscription interface {
|
||||
// Pending returns the number of queued messages and queued bytes for this subscription.
|
||||
Pending() (int64, int64, error)
|
||||
|
||||
// Delivered returns the number of delivered messages for this subscription.
|
||||
Delivered() (int64, error)
|
||||
|
||||
// Close removes this subscriber
|
||||
Close() error
|
||||
}
|
||||
|
||||
type subscription struct {
|
||||
sub stan.Subscription
|
||||
}
|
||||
|
||||
func (s subscription) Pending() (int64, int64, error) {
|
||||
messages, bytes, err := s.sub.Pending()
|
||||
return int64(messages), int64(bytes), err
|
||||
}
|
||||
|
||||
func (s subscription) Delivered() (int64, error) {
|
||||
return s.sub.Delivered()
|
||||
}
|
||||
|
||||
func (s subscription) Close() error {
|
||||
return s.sub.Close()
|
||||
}
|
|
@ -24,8 +24,11 @@ func TestWriter_Limited(t *testing.T) {
|
|||
}
|
||||
|
||||
rate := float64(n) / elapsed.Seconds()
|
||||
if rate > float64(limit) {
|
||||
t.Errorf("rate limit mismath: exp %f, got %f", float64(limit), rate)
|
||||
// 1% tolerance - we have seen the limit be slightly off on Windows systems, likely due to
|
||||
// rounding of time intervals.
|
||||
tolerance := 1.01
|
||||
if rate > (float64(limit) * tolerance) {
|
||||
t.Errorf("rate limit mismatch: exp %f, got %f", float64(limit), rate)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,7 +46,7 @@ func TestWriter_Limiter_ExceedBurst(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
if n != len(twentyOneBytes) {
|
||||
t.Errorf("exected %d bytes written, but got %d", len(twentyOneBytes), n)
|
||||
t.Errorf("expected %d bytes written, but got %d", len(twentyOneBytes), n)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,7 +46,9 @@ func TestCreateNewQueueDirExists(t *testing.T) {
|
|||
func TestEnqueueScan(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
data := "weather,location=us-midwest temperature=82 1465839830100400200"
|
||||
data1 := "weather,location=us-midwest temperature=82 1465839830100400200"
|
||||
data2 := "weather,location=us-midwest temperature=84 1465839830100400201"
|
||||
data3 := "weather,location=us-midwest temperature=86 1465839830100400202"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -55,28 +57,31 @@ func TestEnqueueScan(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "single point with successful write",
|
||||
testData: []string{data},
|
||||
testData: []string{data1},
|
||||
writeFuncReturn: nil,
|
||||
},
|
||||
{
|
||||
name: "multiple points with successful write",
|
||||
testData: []string{data, data, data},
|
||||
testData: []string{data1, data2, data3},
|
||||
writeFuncReturn: nil,
|
||||
},
|
||||
{
|
||||
name: "single point with unsuccessful write",
|
||||
testData: []string{data},
|
||||
testData: []string{data1},
|
||||
writeFuncReturn: errors.New("some error"),
|
||||
},
|
||||
{
|
||||
name: "multiple points with unsuccessful write",
|
||||
testData: []string{data, data, data},
|
||||
testData: []string{data1, data2, data3},
|
||||
writeFuncReturn: errors.New("some error"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.name == "multiple points with unsuccessful write" {
|
||||
t.Skip("Fix this test when https://github.com/influxdata/influxdb/issues/23109 is fixed")
|
||||
}
|
||||
queuePath, qm := initQueueManager(t)
|
||||
defer os.RemoveAll(filepath.Dir(queuePath))
|
||||
|
||||
|
@ -85,7 +90,7 @@ func TestEnqueueScan(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
rq := qm.replicationQueues[id1]
|
||||
var writeCounter sync.WaitGroup
|
||||
rq.remoteWriter = getTestRemoteWriter(t, data, tt.writeFuncReturn, &writeCounter)
|
||||
rq.remoteWriter = getTestRemoteWriterSequenced(t, tt.testData, tt.writeFuncReturn, &writeCounter)
|
||||
|
||||
// Enqueue the data
|
||||
for _, dat := range tt.testData {
|
||||
|
@ -357,11 +362,16 @@ func (tw *testRemoteWriter) Write(data []byte) error {
|
|||
return tw.writeFn(data)
|
||||
}
|
||||
|
||||
func getTestRemoteWriter(t *testing.T, expected string, returning error, wg *sync.WaitGroup) remoteWriter {
|
||||
func getTestRemoteWriterSequenced(t *testing.T, expected []string, returning error, wg *sync.WaitGroup) remoteWriter {
|
||||
t.Helper()
|
||||
|
||||
count := 0
|
||||
writeFn := func(b []byte) error {
|
||||
require.Equal(t, expected, string(b))
|
||||
if count >= len(expected) {
|
||||
t.Fatalf("count larger than expected len, %d > %d", count, len(expected))
|
||||
}
|
||||
require.Equal(t, expected[count], string(b))
|
||||
count++
|
||||
if wg != nil {
|
||||
wg.Done()
|
||||
}
|
||||
|
@ -375,6 +385,19 @@ func getTestRemoteWriter(t *testing.T, expected string, returning error, wg *syn
|
|||
return writer
|
||||
}
|
||||
|
||||
func getTestRemoteWriter(t *testing.T, expected string) remoteWriter {
|
||||
t.Helper()
|
||||
|
||||
writer := &testRemoteWriter{
|
||||
writeFn: func(b []byte) error {
|
||||
require.Equal(t, expected, string(b))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return writer
|
||||
}
|
||||
|
||||
func TestEnqueueData(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -431,7 +454,7 @@ func TestEnqueueData_WithMetrics(t *testing.T) {
|
|||
data := "some fake data"
|
||||
numPointsPerData := 3
|
||||
numDataToAdd := 4
|
||||
rq.remoteWriter = getTestRemoteWriter(t, data, nil, nil)
|
||||
rq.remoteWriter = getTestRemoteWriter(t, data)
|
||||
|
||||
for i := 1; i <= numDataToAdd; i++ {
|
||||
go func() { <-rq.receive }() // absorb the receive to avoid testcase deadlock
|
||||
|
|
|
@ -2,7 +2,6 @@ package remotewrite
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
|
@ -67,7 +66,6 @@ type writer struct {
|
|||
maximumAttemptsForBackoffTime int
|
||||
clientTimeout time.Duration
|
||||
done chan struct{}
|
||||
maximumAttemptsBeforeErr int // used for testing, 0 for unlimited
|
||||
waitFunc waitFunc // used for testing
|
||||
}
|
||||
|
||||
|
@ -110,9 +108,6 @@ func (w *writer) Write(data []byte) error {
|
|||
attempts := 0
|
||||
|
||||
for {
|
||||
if w.maximumAttemptsBeforeErr > 0 && attempts >= w.maximumAttemptsBeforeErr {
|
||||
return errors.New("maximum number of attempts exceeded")
|
||||
}
|
||||
|
||||
// Get the most recent config on every attempt, in case the user has updated the config to correct errors.
|
||||
conf, err := w.configStore.GetFullHTTPConfig(ctx, w.replicationID)
|
||||
|
|
|
@ -28,19 +28,28 @@ var (
|
|||
testID = platform.ID(1)
|
||||
)
|
||||
|
||||
func testWriter(t *testing.T) (*writer, *replicationsMock.MockHttpConfigStore) {
|
||||
func testWriter(t *testing.T) (*writer, *replicationsMock.MockHttpConfigStore, chan struct{}) {
|
||||
ctrl := gomock.NewController(t)
|
||||
configStore := replicationsMock.NewMockHttpConfigStore(ctrl)
|
||||
w := NewWriter(testID, configStore, metrics.NewReplicationsMetrics(), zaptest.NewLogger(t), make(chan struct{}))
|
||||
return w, configStore
|
||||
done := make(chan struct{})
|
||||
w := NewWriter(testID, configStore, metrics.NewReplicationsMetrics(), zaptest.NewLogger(t), done)
|
||||
return w, configStore, done
|
||||
}
|
||||
|
||||
func testServer(t *testing.T, status int, wantData []byte) *httptest.Server {
|
||||
func constantStatus(i int) func(int) int {
|
||||
return func(int) int {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
func testServer(t *testing.T, statusForCount func(int) int, wantData []byte) *httptest.Server {
|
||||
count := 0
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gotData, err := ioutil.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wantData, gotData)
|
||||
w.WriteHeader(status)
|
||||
w.WriteHeader(statusForCount(count))
|
||||
count++
|
||||
}))
|
||||
}
|
||||
|
||||
|
@ -60,7 +69,7 @@ func TestWrite(t *testing.T) {
|
|||
t.Run("error getting config", func(t *testing.T) {
|
||||
wantErr := errors.New("uh oh")
|
||||
|
||||
w, configStore := testWriter(t)
|
||||
w, configStore, _ := testWriter(t)
|
||||
|
||||
configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(nil, wantErr)
|
||||
require.Equal(t, wantErr, w.Write([]byte{}))
|
||||
|
@ -71,21 +80,21 @@ func TestWrite(t *testing.T) {
|
|||
RemoteURL: "not a good URL",
|
||||
}
|
||||
|
||||
w, configStore := testWriter(t)
|
||||
w, configStore, _ := testWriter(t)
|
||||
|
||||
configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil)
|
||||
require.Error(t, w.Write([]byte{}))
|
||||
})
|
||||
|
||||
t.Run("immediate good response", func(t *testing.T) {
|
||||
svr := testServer(t, http.StatusNoContent, testData)
|
||||
svr := testServer(t, constantStatus(http.StatusNoContent), testData)
|
||||
defer svr.Close()
|
||||
|
||||
testConfig := &influxdb.ReplicationHTTPConfig{
|
||||
RemoteURL: svr.URL,
|
||||
}
|
||||
|
||||
w, configStore := testWriter(t)
|
||||
w, configStore, _ := testWriter(t)
|
||||
|
||||
configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil)
|
||||
configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusNoContent, "").Return(nil)
|
||||
|
@ -95,74 +104,44 @@ func TestWrite(t *testing.T) {
|
|||
t.Run("error updating response info", func(t *testing.T) {
|
||||
wantErr := errors.New("o no")
|
||||
|
||||
svr := testServer(t, http.StatusNoContent, testData)
|
||||
svr := testServer(t, constantStatus(http.StatusNoContent), testData)
|
||||
defer svr.Close()
|
||||
|
||||
testConfig := &influxdb.ReplicationHTTPConfig{
|
||||
RemoteURL: svr.URL,
|
||||
}
|
||||
|
||||
w, configStore := testWriter(t)
|
||||
w, configStore, _ := testWriter(t)
|
||||
|
||||
configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil)
|
||||
configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusNoContent, "").Return(wantErr)
|
||||
require.Equal(t, wantErr, w.Write(testData))
|
||||
})
|
||||
|
||||
t.Run("bad server responses that never succeed", func(t *testing.T) {
|
||||
testAttempts := 3
|
||||
|
||||
for _, status := range []int{http.StatusOK, http.StatusTeapot, http.StatusInternalServerError} {
|
||||
t.Run(fmt.Sprintf("status code %d", status), func(t *testing.T) {
|
||||
svr := testServer(t, status, testData)
|
||||
defer svr.Close()
|
||||
|
||||
testConfig := &influxdb.ReplicationHTTPConfig{
|
||||
RemoteURL: svr.URL,
|
||||
}
|
||||
|
||||
w, configStore := testWriter(t)
|
||||
w.waitFunc = instaWait()
|
||||
w.maximumAttemptsBeforeErr = testAttempts
|
||||
|
||||
configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil).Times(testAttempts)
|
||||
configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, status, invalidResponseCode(status).Error()).Return(nil).Times(testAttempts)
|
||||
require.Equal(t, errors.New("maximum number of attempts exceeded"), w.Write(testData))
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("bad server responses at first followed by good server responses", func(t *testing.T) {
|
||||
testAttempts := 10
|
||||
attemptsBeforeSuccess := 3
|
||||
serverCounter := 0
|
||||
badStatus := http.StatusInternalServerError
|
||||
goodStatus := http.StatusNoContent
|
||||
|
||||
svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
serverCounter++
|
||||
gotData, err := ioutil.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, testData, gotData)
|
||||
if serverCounter >= attemptsBeforeSuccess {
|
||||
w.WriteHeader(goodStatus)
|
||||
return
|
||||
status := func(count int) int {
|
||||
if count >= attemptsBeforeSuccess {
|
||||
return goodStatus
|
||||
}
|
||||
return badStatus
|
||||
}
|
||||
|
||||
w.WriteHeader(badStatus)
|
||||
}))
|
||||
svr := testServer(t, status, testData)
|
||||
defer svr.Close()
|
||||
|
||||
testConfig := &influxdb.ReplicationHTTPConfig{
|
||||
RemoteURL: svr.URL,
|
||||
}
|
||||
|
||||
w, configStore := testWriter(t)
|
||||
w, configStore, _ := testWriter(t)
|
||||
w.waitFunc = instaWait()
|
||||
w.maximumAttemptsBeforeErr = testAttempts
|
||||
|
||||
configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil).Times(attemptsBeforeSuccess)
|
||||
configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, badStatus, invalidResponseCode(badStatus).Error()).Return(nil).Times(attemptsBeforeSuccess - 1)
|
||||
configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil).Times(attemptsBeforeSuccess + 1)
|
||||
configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, badStatus, invalidResponseCode(badStatus).Error()).Return(nil).Times(attemptsBeforeSuccess)
|
||||
configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, goodStatus, "").Return(nil)
|
||||
require.NoError(t, w.Write(testData))
|
||||
})
|
||||
|
@ -170,7 +149,7 @@ func TestWrite(t *testing.T) {
|
|||
t.Run("drops bad data after config is updated", func(t *testing.T) {
|
||||
testAttempts := 5
|
||||
|
||||
svr := testServer(t, http.StatusBadRequest, testData)
|
||||
svr := testServer(t, constantStatus(http.StatusBadRequest), testData)
|
||||
defer svr.Close()
|
||||
|
||||
testConfig := &influxdb.ReplicationHTTPConfig{
|
||||
|
@ -182,9 +161,8 @@ func TestWrite(t *testing.T) {
|
|||
DropNonRetryableData: true,
|
||||
}
|
||||
|
||||
w, configStore := testWriter(t)
|
||||
w, configStore, _ := testWriter(t)
|
||||
w.waitFunc = instaWait()
|
||||
w.maximumAttemptsBeforeErr = testAttempts
|
||||
|
||||
configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil).Times(testAttempts - 1)
|
||||
configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(updatedConfig, nil)
|
||||
|
@ -193,7 +171,6 @@ func TestWrite(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("uses wait time from response header if present", func(t *testing.T) {
|
||||
testAttempts := 3
|
||||
numSeconds := 5
|
||||
waitTimeFromHeader := 5 * time.Second
|
||||
|
||||
|
@ -210,79 +187,75 @@ func TestWrite(t *testing.T) {
|
|||
RemoteURL: svr.URL,
|
||||
}
|
||||
|
||||
w, configStore := testWriter(t)
|
||||
w, configStore, done := testWriter(t)
|
||||
w.waitFunc = func(dur time.Duration) <-chan time.Time {
|
||||
require.Equal(t, waitTimeFromHeader, dur)
|
||||
out := make(chan time.Time)
|
||||
close(out)
|
||||
return out
|
||||
close(done)
|
||||
return instaWait()(dur)
|
||||
}
|
||||
w.maximumAttemptsBeforeErr = testAttempts
|
||||
|
||||
configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil).Times(testAttempts)
|
||||
configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusTooManyRequests, invalidResponseCode(http.StatusTooManyRequests).Error()).Return(nil).Times(testAttempts)
|
||||
require.Equal(t, errors.New("maximum number of attempts exceeded"), w.Write(testData))
|
||||
configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil).MinTimes(1)
|
||||
configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusTooManyRequests, invalidResponseCode(http.StatusTooManyRequests).Error()).Return(nil).MinTimes(1)
|
||||
err := w.Write(testData)
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
})
|
||||
|
||||
t.Run("can cancel with done channel", func(t *testing.T) {
|
||||
svr := testServer(t, http.StatusInternalServerError, testData)
|
||||
svr := testServer(t, constantStatus(http.StatusInternalServerError), testData)
|
||||
defer svr.Close()
|
||||
|
||||
testConfig := &influxdb.ReplicationHTTPConfig{
|
||||
RemoteURL: svr.URL,
|
||||
}
|
||||
|
||||
w, configStore := testWriter(t)
|
||||
done := make(chan struct{})
|
||||
w.done = done
|
||||
w.waitFunc = func(dur time.Duration) <-chan time.Time {
|
||||
close(done)
|
||||
return time.After(time.Second)
|
||||
}
|
||||
w, configStore, done := testWriter(t)
|
||||
|
||||
configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil)
|
||||
configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusInternalServerError, invalidResponseCode(http.StatusInternalServerError).Error()).Return(nil)
|
||||
configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil).MinTimes(1)
|
||||
configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusInternalServerError, invalidResponseCode(http.StatusInternalServerError).Error()).
|
||||
DoAndReturn(func(_, _, _, _ interface{}) error {
|
||||
close(done)
|
||||
return nil
|
||||
})
|
||||
require.Equal(t, context.Canceled, w.Write(testData))
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrite_Metrics(t *testing.T) {
|
||||
maximumAttemptsBeforeErr := 5
|
||||
testData := []byte("this is some data")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
status int
|
||||
status func(int) int
|
||||
data []byte
|
||||
wantWriteErr error
|
||||
registerExpectations func(*testing.T, *replicationsMock.MockHttpConfigStore, *influxdb.ReplicationHTTPConfig)
|
||||
checkMetrics func(*testing.T, *prom.Registry)
|
||||
}{
|
||||
{
|
||||
name: "server errors",
|
||||
status: http.StatusTeapot,
|
||||
data: []byte{},
|
||||
wantWriteErr: errors.New("maximum number of attempts exceeded"),
|
||||
name: "server errors",
|
||||
status: func(i int) int {
|
||||
arr := []int{http.StatusTeapot, http.StatusTeapot, http.StatusTeapot, http.StatusNoContent}
|
||||
return arr[i]
|
||||
},
|
||||
data: []byte{},
|
||||
registerExpectations: func(t *testing.T, store *replicationsMock.MockHttpConfigStore, conf *influxdb.ReplicationHTTPConfig) {
|
||||
store.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(conf, nil).Times(5)
|
||||
store.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusTeapot, invalidResponseCode(http.StatusTeapot).Error()).Return(nil).Times(5)
|
||||
store.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(conf, nil).Times(4)
|
||||
store.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusTeapot, invalidResponseCode(http.StatusTeapot).Error()).Return(nil).Times(3)
|
||||
store.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusNoContent, "").Return(nil).Times(1)
|
||||
},
|
||||
checkMetrics: func(t *testing.T, reg *prom.Registry) {
|
||||
mfs := promtest.MustGather(t, reg)
|
||||
|
||||
errorCodes := promtest.FindMetric(mfs, "replications_queue_remote_write_errors", map[string]string{
|
||||
"replicationID": testID.String(),
|
||||
"code": strconv.Itoa(http.StatusTeapot),
|
||||
})
|
||||
require.NotNil(t, errorCodes)
|
||||
require.Equal(t, float64(maximumAttemptsBeforeErr), errorCodes.Counter.GetValue())
|
||||
require.Equal(t, 3.0, errorCodes.Counter.GetValue())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "successful write",
|
||||
status: http.StatusNoContent,
|
||||
data: testData,
|
||||
wantWriteErr: nil,
|
||||
name: "successful write",
|
||||
status: constantStatus(http.StatusNoContent),
|
||||
data: testData,
|
||||
registerExpectations: func(t *testing.T, store *replicationsMock.MockHttpConfigStore, conf *influxdb.ReplicationHTTPConfig) {
|
||||
store.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(conf, nil)
|
||||
store.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusNoContent, "").Return(nil)
|
||||
|
@ -298,10 +271,9 @@ func TestWrite_Metrics(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "dropped data",
|
||||
status: http.StatusBadRequest,
|
||||
data: testData,
|
||||
wantWriteErr: nil,
|
||||
name: "dropped data",
|
||||
status: constantStatus(http.StatusBadRequest),
|
||||
data: testData,
|
||||
registerExpectations: func(t *testing.T, store *replicationsMock.MockHttpConfigStore, conf *influxdb.ReplicationHTTPConfig) {
|
||||
store.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(conf, nil)
|
||||
store.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusBadRequest, invalidResponseCode(http.StatusBadRequest).Error()).Return(nil)
|
||||
|
@ -328,15 +300,13 @@ func TestWrite_Metrics(t *testing.T) {
|
|||
DropNonRetryableData: true,
|
||||
}
|
||||
|
||||
w, configStore := testWriter(t)
|
||||
w, configStore, _ := testWriter(t)
|
||||
w.waitFunc = instaWait()
|
||||
reg := prom.NewRegistry(zaptest.NewLogger(t))
|
||||
reg.MustRegister(w.metrics.PrometheusCollectors()...)
|
||||
|
||||
w.waitFunc = instaWait()
|
||||
w.maximumAttemptsBeforeErr = maximumAttemptsBeforeErr
|
||||
|
||||
tt.registerExpectations(t, configStore, testConfig)
|
||||
require.Equal(t, tt.wantWriteErr, w.Write(tt.data))
|
||||
require.NoError(t, w.Write(tt.data))
|
||||
tt.checkMetrics(t, reg)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -77,6 +77,7 @@ func WithMetricsDisabled(m bool) Option {
|
|||
|
||||
type MetaClient interface {
|
||||
CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error)
|
||||
DropDatabase(name string) error
|
||||
CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error)
|
||||
Database(name string) (di *meta.DatabaseInfo)
|
||||
Databases() []meta.DatabaseInfo
|
||||
|
@ -317,7 +318,11 @@ func (e *Engine) UpdateBucketRetentionPolicy(ctx context.Context, bucketID platf
|
|||
func (e *Engine) DeleteBucket(ctx context.Context, orgID, bucketID platform.ID) error {
|
||||
span, _ := tracing.StartSpanFromContext(ctx)
|
||||
defer span.Finish()
|
||||
return e.tsdbStore.DeleteDatabase(bucketID.String())
|
||||
err := e.tsdbStore.DeleteDatabase(bucketID.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return e.metaClient.DropDatabase(bucketID.String())
|
||||
}
|
||||
|
||||
// DeleteBucketRangePredicate deletes data within a bucket from the storage engine. Any data
|
||||
|
|
|
@ -776,6 +776,8 @@ func (s *Store) DeleteShard(shardID uint64) error {
|
|||
}
|
||||
|
||||
// DeleteDatabase will close all shards associated with a database and remove the directory and files from disk.
|
||||
//
|
||||
// Returns nil if no database exists
|
||||
func (s *Store) DeleteDatabase(name string) error {
|
||||
s.mu.RLock()
|
||||
if _, ok := s.databases[name]; !ok {
|
||||
|
|
|
@ -128,24 +128,6 @@ func (c *Client) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// AcquireLease attempts to acquire the specified lease.
|
||||
// TODO corylanou remove this for single node
|
||||
func (c *Client) AcquireLease(name string) (*Lease, error) {
|
||||
l := Lease{
|
||||
Name: name,
|
||||
Expiration: time.Now().Add(DefaultLeaseDuration),
|
||||
}
|
||||
return &l, nil
|
||||
}
|
||||
|
||||
// ClusterID returns the ID of the cluster it's connected to.
|
||||
func (c *Client) ClusterID() uint64 {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
return c.cacheData.ClusterID
|
||||
}
|
||||
|
||||
// Database returns info for the requested database.
|
||||
func (c *Client) Database(name string) *DatabaseInfo {
|
||||
c.mu.RLock()
|
||||
|
@ -272,6 +254,8 @@ func (c *Client) CreateDatabaseWithRetentionPolicy(name string, spec *RetentionP
|
|||
}
|
||||
|
||||
// DropDatabase deletes a database.
|
||||
//
|
||||
// Returns nil if no database exists
|
||||
func (c *Client) DropDatabase(name string) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
|
|
@ -2,7 +2,6 @@ package meta_test
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -1155,37 +1154,6 @@ func TestMetaClient_CreateShardGroupWithShards(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMetaClient_PersistClusterIDAfterRestart(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cfg := newConfig()
|
||||
defer os.RemoveAll(cfg.Dir)
|
||||
|
||||
store := newStore()
|
||||
|
||||
c := meta.NewClient(cfg, store)
|
||||
if err := c.Open(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := c.ClusterID()
|
||||
if id == 0 {
|
||||
t.Fatal("cluster ID can't be zero")
|
||||
}
|
||||
|
||||
c = meta.NewClient(cfg, store)
|
||||
if err := c.Open(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
idAfter := c.ClusterID()
|
||||
if idAfter == 0 {
|
||||
t.Fatal("cluster ID can't be zero")
|
||||
} else if idAfter != id {
|
||||
t.Fatalf("cluster id not the same: %d, %d", idAfter, id)
|
||||
}
|
||||
}
|
||||
|
||||
func newClient() (func(), *meta.Client) {
|
||||
cfg := newConfig()
|
||||
store := newStore()
|
||||
|
|
Loading…
Reference in New Issue