prefix partial write errors with `partial write:`
NOTE: parser errors (via http API) are also transformed into PartialWriteErrorpull/8336/head
parent
a736f186f0
commit
8097e817f6
|
@ -675,7 +675,7 @@ func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user *meta.
|
|||
} else if werr, ok := err.(tsdb.PartialWriteError); ok {
|
||||
atomic.AddInt64(&h.stats.PointsWrittenOK, int64(len(points)-werr.Dropped))
|
||||
atomic.AddInt64(&h.stats.PointsWrittenDropped, int64(werr.Dropped))
|
||||
h.httpError(w, fmt.Sprintf("partial write: %v", werr), http.StatusBadRequest)
|
||||
h.httpError(w, werr.Error(), http.StatusBadRequest)
|
||||
return
|
||||
} else if err != nil {
|
||||
atomic.AddInt64(&h.stats.PointsWrittenFail, int64(len(points)))
|
||||
|
@ -686,7 +686,7 @@ func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user *meta.
|
|||
atomic.AddInt64(&h.stats.PointsWrittenOK, int64(len(points)))
|
||||
// The other points failed to parse which means the client sent invalid line protocol. We return a 400
|
||||
// response code as well as the lines that failed to parse.
|
||||
h.httpError(w, fmt.Sprintf("partial write:\n%v", parseError), http.StatusBadRequest)
|
||||
h.httpError(w, tsdb.PartialWriteError{Reason: parseError.Error()}.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ type PartialWriteError struct {
|
|||
}
|
||||
|
||||
func (e PartialWriteError) Error() string {
|
||||
return fmt.Sprintf("%s dropped=%d", e.Reason, e.Dropped)
|
||||
return fmt.Sprintf("partial write: %s dropped=%d", e.Reason, e.Dropped)
|
||||
}
|
||||
|
||||
// Shard represents a self-contained time series database. An inverted index of
|
||||
|
@ -539,7 +539,7 @@ func (s *Shard) validateSeriesAndFields(points []models.Point) ([]models.Point,
|
|||
fieldsToCreate []*FieldCreate
|
||||
err error
|
||||
dropped int
|
||||
reason string
|
||||
reason string // only first error reason is set unless returned from CreateSeriesListIfNotExists
|
||||
)
|
||||
|
||||
// Create all series against the index in bulk.
|
||||
|
@ -553,6 +553,9 @@ func (s *Shard) validateSeriesAndFields(points []models.Point) ([]models.Point,
|
|||
tags := p.Tags()
|
||||
if v := tags.Get(timeBytes); v != nil {
|
||||
dropped++
|
||||
if reason == "" {
|
||||
reason = fmt.Sprintf("invalid field name: input tag \"%s\" on measurement \"%s\" is invalid", "time", p.Name())
|
||||
}
|
||||
continue
|
||||
}
|
||||
keys[j] = p.Key()
|
||||
|
@ -597,6 +600,9 @@ func (s *Shard) validateSeriesAndFields(points []models.Point) ([]models.Point,
|
|||
|
||||
if !validField {
|
||||
dropped++
|
||||
if reason == "" {
|
||||
reason = fmt.Sprintf("invalid field name: input tag \"%s\" on measurement \"%s\" is invalid", "time", p.Name())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -646,7 +652,9 @@ func (s *Shard) validateSeriesAndFields(points []models.Point) ([]models.Point,
|
|||
if f.Type != fieldType {
|
||||
atomic.AddInt64(&s.stats.WritePointsDropped, 1)
|
||||
dropped++
|
||||
reason = fmt.Sprintf("%s: input field \"%s\" on measurement \"%s\" is type %s, already exists as type %s", ErrFieldTypeConflict, iter.FieldKey(), name, fieldType, f.Type)
|
||||
if reason == "" {
|
||||
reason = fmt.Sprintf("%s: input field \"%s\" on measurement \"%s\" is type %s, already exists as type %s", ErrFieldTypeConflict, iter.FieldKey(), name, fieldType, f.Type)
|
||||
}
|
||||
skip = true
|
||||
} else {
|
||||
continue // Field is present, and it's of the same type. Nothing more to do.
|
||||
|
|
|
@ -135,7 +135,7 @@ func TestMaxSeriesLimit(t *testing.T) {
|
|||
err = sh.WritePoints([]models.Point{pt})
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
} else if exp, got := `max-series-per-database limit exceeded: (1000) dropped=1`, err.Error(); exp != got {
|
||||
} else if exp, got := `partial write: max-series-per-database limit exceeded: (1000) dropped=1`, err.Error(); exp != got {
|
||||
t.Fatalf("unexpected error message:\n\texp = %s\n\tgot = %s", exp, got)
|
||||
}
|
||||
|
||||
|
@ -188,7 +188,7 @@ func TestShard_MaxTagValuesLimit(t *testing.T) {
|
|||
err = sh.WritePoints([]models.Point{pt})
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
} else if exp, got := `max-values-per-tag limit exceeded (1000/1000): measurement="cpu" tag="host" value="server9999" dropped=1`, err.Error(); exp != got {
|
||||
} else if exp, got := `partial write: max-values-per-tag limit exceeded (1000/1000): measurement="cpu" tag="host" value="server9999" dropped=1`, err.Error(); exp != got {
|
||||
t.Fatalf("unexpected error message:\n\texp = %s\n\tgot = %s", exp, got)
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue