fix(influxql): set correct Content-Type on v1 query responses (#20565)

pull/20576/head
Daniel Moran 2021-01-22 07:31:52 -08:00 committed by GitHub
parent 52da4020f8
commit 10fcc2bf1d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 96 additions and 205 deletions

View File

@ -54,6 +54,8 @@ Replacement `tsi1` indexes will be automatically generated on startup for shards
1. [20527](https://github.com/influxdata/influxdb/pull/20527): Allow backups to complete while a snapshot is in progress.
1. [20539](https://github.com/influxdata/influxdb/pull/20539): Prevent extra output row from GROUP BY crossing DST boundary.
1. [20548](https://github.com/influxdata/influxdb/pull/20548): Prevent panic in `influxd upgrade` when V1 users exist and no V1 config is given.
1. [20565](https://github.com/influxdata/influxdb/pull/20565): Set correct Content-Type on v1 query responses.
1. [20565](https://github.com/influxdata/influxdb/pull/20565): Update V1 API spec to document all valid Accept headers and matching Content-Types.
## v2.0.3 [2020-12-14]

View File

@ -142,11 +142,15 @@ func (h *InfluxqlHandler) handleInfluxqldQuery(w http.ResponseWriter, r *http.Re
}
}
formatString := r.Header.Get("Accept")
encodingFormat := influxql.EncodingFormatFromMimeType(formatString)
w.Header().Set("Content-Type", encodingFormat.ContentType())
req := &influxql.QueryRequest{
DB: r.FormValue("db"),
RP: r.FormValue("rp"),
Epoch: r.FormValue("epoch"),
EncodingFormat: influxql.EncodingFormatFromMimeType(r.Header.Get("Accept")),
EncodingFormat: encodingFormat,
OrganizationID: o.ID,
Query: query,
Params: params,

View File

@ -15,6 +15,7 @@ import (
imock "github.com/influxdata/influxdb/v2/influxql/mock"
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
"github.com/influxdata/influxdb/v2/mock"
"go.uber.org/zap/zaptest"
)
var cmpOpts = []cmp.Option{
@ -41,8 +42,6 @@ var cmpOpts = []cmp.Option{
}
func TestInfluxQLdHandler_HandleQuery(t *testing.T) {
t.Skip("almost good to go, only unexpected content types")
ctx := context.Background()
type fields struct {
@ -61,7 +60,6 @@ func TestInfluxQLdHandler_HandleQuery(t *testing.T) {
wantCode int
wantHeader http.Header
wantBody []byte
wantLogs []string
}{
{
name: "no token causes http error",
@ -189,15 +187,14 @@ func TestInfluxQLdHandler_HandleQuery(t *testing.T) {
},
},
args: args{
r: WithHeader(httptest.NewRequest("POST", "/query", nil).WithContext(ctx), "Accept", "text/csv"),
r: WithHeader(httptest.NewRequest("POST", "/query", nil).WithContext(ctx), "Accept", "application/foo"),
w: httptest.NewRecorder(),
},
wantBody: []byte("good"),
wantCode: http.StatusOK,
wantHeader: http.Header{
"Content-Type": {"text/csv"},
"Content-Type": {"application/json"},
},
wantLogs: []string{"text/csv"},
},
{
name: "good query",
@ -235,6 +232,7 @@ func TestInfluxQLdHandler_HandleQuery(t *testing.T) {
}
h := NewInfluxQLHandler(b, HandlerConfig{})
h.Logger = zaptest.NewLogger(t)
if tt.context != nil {
tt.args.r = tt.args.r.WithContext(tt.context)
@ -255,7 +253,6 @@ func TestInfluxQLdHandler_HandleQuery(t *testing.T) {
if got, want := tt.args.w.Body.Bytes(), tt.wantBody; !cmp.Equal(got, want) {
t.Errorf("HandleQuery() body = got(-)/want(+) %s", cmp.Diff(string(got), string(want)))
}
})
}
}

View File

@ -114,6 +114,17 @@ paths:
- $ref: "#/components/parameters/TraceSpan"
- $ref: "#/components/parameters/AuthUserV1"
- $ref: "#/components/parameters/AuthPassV1"
- in: header
name: Accept
schema:
type: string
description: Specifies how query results should be encoded in the response.
default: application/json
enum:
- application/json
- application/csv
- text/csv
- application/x-msgpack
- in: header
name: Accept-Encoding
description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand.
@ -154,17 +165,19 @@ paths:
type: string
description: Specifies the request's trace ID.
content:
application/csv:
schema:
$ref: "#/components/schemas/InfluxQLCSVResponse"
text/csv:
schema:
type: string
example: >
name,tags,time,test_field,test_tag
test_measurement,,1603740794286107366,1,tag_value
test_measurement,,1603740870053205649,2,tag_value
test_measurement,,1603741221085428881,3,tag_value
text/plain:
$ref: "#/components/schemas/InfluxQLCSVResponse"
application/json:
schema:
$ref: "#/components/schemas/InfluxQLResponse"
application/x-msgpack:
schema:
type: string
format: binary
"429":
description: Token is temporarily over quota. The Retry-After header describes when to try the read again.
headers:
@ -233,6 +246,14 @@ components:
items:
type: array
items: {}
InfluxQLCSVResponse:
type: string
example: >
name,tags,time,test_field,test_tag
test_measurement,,1603740794286107366,1,tag_value
test_measurement,,1603740870053205649,2,tag_value
test_measurement,,1603741221085428881,3,tag_value
Error:
properties:
code:

View File

@ -74,6 +74,7 @@ func (q *Query) Execute(ctx context.Context, t *testing.T, db string, c *tests.C
QueryParams(params...).
Header("Accept", "application/json").
RespFn(func(resp *http.Response) error {
require.Equal(t, "application/json", resp.Header.Get("Content-Type"))
b, err := ioutil.ReadAll(resp.Body)
q.got = strings.TrimSpace(string(b))
return err

View File

@ -5,10 +5,8 @@ description: |
tests:
- query: "select sum(f0) from m0"
result: |
name: m0
time sum
---- ---
0 10
name,tags,time,sum
m0,,0,10
generated:
start: "1000000000000"

View File

@ -8,10 +8,8 @@ tests:
- name: "no_aggregate"
query: "select f1 from m0 where f0 > 1 AND time >= 30000000000 AND time <= 50000000000"
result: |
name: m0
time f1
---- --
50000000000 30
name,tags,time,f1
m0,,50000000000,30
- name: "aggregate"
description: |
@ -19,10 +17,8 @@ tests:
query: "SELECT sum(f1) FROM m0 WHERE f0 >= 1"
result: |
name: m0
time sum
---- ---
0 50
name,tags,time,sum
m0,,0,50
dataset: |
m0,t0=tv0 f0=0i,f1=10i 30000000000

View File

@ -1,10 +1,8 @@
tests:
- query: "select sum(used), mean(active) from mem where time >= 30000000000 AND time < 50000000000"
result: |
name: mem
time sum mean
---- --- ----
30000000000 6549852160 7164311552
name,tags,time,sum,mean
mem,,30000000000,6549852160,7164311552
dataset: |
mem,host=gianarb page_tables=39534592i,vmalloc_chunk=0i,write_back_tmp=0i,dirty=884736i,high_total=0i,available=11992494080i,used=3284553728i,active=7172775936i,huge_pages_free=0i,swap_total=8589930496i,vmalloc_used=38604800i,free=4928421888i,commit_limit=16853958656i,committed_as=12584218624i,mapped=939278336i,vmalloc_total=35184372087808i,write_back=0i,buffered=989163520i,wired=0i,low_free=0i,huge_page_size=2097152i,swap_cached=120016896i,swap_free=8445227008i,inactive=3461185536i,slab=542363648i,high_free=0i,shared=903233536i,sreclaimable=449650688i,total=16528056320i,cached=7325917184i,available_percent=72.55840522208482,sunreclaim=92712960i,used_percent=19.87259520664557,huge_pages_total=0i,low_total=0i 0

View File

@ -1,11 +1,9 @@
tests:
- query: "select host, inactive from mem where time >=30000000000 AND time < 50000000000"
result: |
name: mem
time host inactive
---- ---- --------
30000000000 gianarb 3460194304
40000000000 gianarb 3454791680
name,tags,time,host,inactive
mem,,30000000000,gianarb,3460194304
mem,,40000000000,gianarb,3454791680
dataset: |
mem,host=gianarb page_tables=39534592i,vmalloc_chunk=0i,write_back_tmp=0i,dirty=884736i,high_total=0i,available=11992494080i,used=3284553728i,active=7172775936i,huge_pages_free=0i,swap_total=8589930496i,vmalloc_used=38604800i,free=4928421888i,commit_limit=16853958656i,committed_as=12584218624i,mapped=939278336i,vmalloc_total=35184372087808i,write_back=0i,buffered=989163520i,wired=0i,low_free=0i,huge_page_size=2097152i,swap_cached=120016896i,swap_free=8445227008i,inactive=3461185536i,slab=542363648i,high_free=0i,shared=903233536i,sreclaimable=449650688i,total=16528056320i,cached=7325917184i,available_percent=72.55840522208482,sunreclaim=92712960i,used_percent=19.87259520664557,huge_pages_total=0i,low_total=0i 0

View File

@ -3,11 +3,9 @@ tests:
billing:
point_count: 1
result: |
name: m0
time f0 f1 t0
---- -- -- --
30000000000 0 10 tv0
40000000000 1 20 tv1
name,tags,time,f0,f1,t0
m0,,30000000000,0,10,tv0
m0,,40000000000,1,20,tv1
dataset: |
m0,t0=tv0 f0=0i,f1=10i 30000000000

View File

@ -64,10 +64,10 @@
"type": "string"
},
"result": {
"description": "The expected results in table format",
"description": "The expected results in CSV format",
"type": "string"
}
}
}
}
}
}

View File

@ -176,7 +176,7 @@ func validate(t *testing.T, gf *TestSuite) {
QueryParams([2]string{"q", test.Query}).
QueryParams([2]string{"epoch", "ns"}).
Header("Content-Type", "application/vnd.influxql").
Header("Accept", "text/plain").
Header("Accept", "application/csv").
RespFn(func(resp *http.Response) error {
b, err := ioutil.ReadAll(resp.Body)
assert.NoError(t, err)

View File

@ -6,13 +6,8 @@ import (
"context"
"encoding/csv"
"encoding/json"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"strings"
"text/tabwriter"
"time"
"github.com/influxdata/influxdb/v2/influxql"
@ -31,10 +26,8 @@ type ResponseWriter interface {
// in the request that wraps the ResponseWriter.
func NewResponseWriter(encoding influxql.EncodingFormat) ResponseWriter {
switch encoding {
case influxql.EncodingFormatCSV:
case influxql.EncodingFormatAppCSV, influxql.EncodingFormatTextCSV:
return &csvFormatter{statementID: -1}
case influxql.EncodingFormatTable:
return &textFormatter{}
case influxql.EncodingFormatMessagePack:
return &msgpFormatter{}
case influxql.EncodingFormatJSON:
@ -297,143 +290,3 @@ func stringsEqual(a, b []string) bool {
}
return true
}
func tagsEqual(prev, current map[string]string) bool {
return reflect.DeepEqual(prev, current)
}
func columnsEqual(prev, current []string) bool {
return reflect.DeepEqual(prev, current)
}
func headersEqual(prev, current models.Row) bool {
if prev.Name != current.Name {
return false
}
return tagsEqual(prev.Tags, current.Tags) && columnsEqual(prev.Columns, current.Columns)
}
type textFormatter struct{}
func (f *textFormatter) WriteResponse(ctx context.Context, w io.Writer, resp Response) (err error) {
span, _ := tracing.StartSpanFromContext(ctx)
defer span.Finish()
if err := resp.Error(); err != nil {
fmt.Fprintln(w, err.Error())
return nil
}
// Create a tabbed writer for each result as they won't always line up
writer := new(tabwriter.Writer)
writer.Init(w, 0, 8, 1, ' ', 0)
var previousHeaders models.Row
for i, result := range resp.Results {
// Print out all messages first
for _, m := range result.Messages {
fmt.Fprintf(w, "%s: %s.\n", m.Level, m.Text)
}
// Check to see if the headers are the same as the previous row. If so, suppress them in the output
suppressHeaders := len(result.Series) > 0 && headersEqual(previousHeaders, *result.Series[0])
if !suppressHeaders && len(result.Series) > 0 {
previousHeaders = models.Row{
Name: result.Series[0].Name,
Tags: result.Series[0].Tags,
Columns: result.Series[0].Columns,
}
}
// If we are suppressing headers, don't output the extra line return. If we
// aren't suppressing headers, then we put out line returns between results
// (not before the first result, and not after the last result).
if !suppressHeaders && i > 0 {
fmt.Fprintln(writer, "")
}
rows := f.formatResults(result.Series, "\t", suppressHeaders)
for _, r := range rows {
fmt.Fprintln(writer, r)
}
}
_ = writer.Flush()
return nil
}
func (f *textFormatter) formatResults(result models.Rows, separator string, suppressHeaders bool) []string {
var rows []string
// Create a tabbed writer for each result as they won't always line up
for i, row := range result {
// gather tags
var tags []string
for k, v := range row.Tags {
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
sort.Strings(tags)
}
var columnNames []string
columnNames = append(columnNames, row.Columns...)
// Output a line separator if we have more than one set or results and format is column
if i > 0 && !suppressHeaders {
rows = append(rows, "")
}
// If we are column format, we break out the name/tag to separate lines
if !suppressHeaders {
if row.Name != "" {
n := fmt.Sprintf("name: %s", row.Name)
rows = append(rows, n)
}
if len(tags) > 0 {
t := fmt.Sprintf("tags: %s", strings.Join(tags, ", "))
rows = append(rows, t)
}
}
if !suppressHeaders {
rows = append(rows, strings.Join(columnNames, separator))
}
// if format is column, write dashes under each column
if !suppressHeaders {
var lines []string
for _, columnName := range columnNames {
lines = append(lines, strings.Repeat("-", len(columnName)))
}
rows = append(rows, strings.Join(lines, separator))
}
for _, v := range row.Values {
var values []string
for _, vv := range v {
values = append(values, interfaceToString(vv))
}
rows = append(rows, strings.Join(values, separator))
}
}
return rows
}
func interfaceToString(v interface{}) string {
switch t := v.(type) {
case nil:
return ""
case bool:
return fmt.Sprintf("%v", v)
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr:
return fmt.Sprintf("%d", t)
case float32:
// Default for floats via `fmt.Sprintf("%v", t)` is to represent them in scientific notation.
// We want to represent them as they are, with the least digits as possible (prec: -1).
return strconv.FormatFloat(float64(t), 'f', -1, 32)
case float64:
// Default for floats via `fmt.Sprintf("%v", t)` is to represent them in scientific notation.
// We want to represent them as they are, with the least digits as possible (prec: -1).
return strconv.FormatFloat(t, 'f', -1, 64)
default:
return fmt.Sprintf("%v", t)
}
}

View File

@ -10,6 +10,7 @@ type EncodingFormat int
func (f *EncodingFormat) UnmarshalJSON(bytes []byte) error {
var s string
if err := json.Unmarshal(bytes, &s); err != nil {
return err
}
@ -24,23 +25,21 @@ func (f EncodingFormat) MarshalJSON() ([]byte, error) {
const (
EncodingFormatJSON EncodingFormat = iota
EncodingFormatCSV
EncodingFormatTextCSV
EncodingFormatAppCSV
EncodingFormatMessagePack
EncodingFormatTable
)
// Returns closed encoding format from the specified mime type.
// The default is JSON if no exact match is found.
func EncodingFormatFromMimeType(s string) EncodingFormat {
switch s {
case "application/csv", "text/csv":
return EncodingFormatCSV
case "text/plain":
return EncodingFormatTable
case "application/csv":
return EncodingFormatAppCSV
case "text/csv":
return EncodingFormatTextCSV
case "application/x-msgpack":
return EncodingFormatMessagePack
case "application/json":
fallthrough
default:
return EncodingFormatJSON
}
@ -48,14 +47,12 @@ func EncodingFormatFromMimeType(s string) EncodingFormat {
func (f EncodingFormat) ContentType() string {
switch f {
case EncodingFormatCSV:
case EncodingFormatAppCSV:
return "application/csv"
case EncodingFormatTextCSV:
return "text/csv"
case EncodingFormatTable:
return "text/plain"
case EncodingFormatMessagePack:
return "application/x-msgpack"
case EncodingFormatJSON:
fallthrough
default:
return "application/json"
}

View File

@ -0,0 +1,28 @@
package influxql
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestEncodingFormatFromMimeType(t *testing.T) {
tests := []struct {
s string
exp EncodingFormat
}{
{s: "application/csv", exp: EncodingFormatAppCSV},
{s: "text/csv", exp: EncodingFormatTextCSV},
{s: "application/x-msgpack", exp: EncodingFormatMessagePack},
{s: "application/json", exp: EncodingFormatJSON},
{s: "*/*", exp: EncodingFormatJSON},
{s: "", exp: EncodingFormatJSON},
{s: "application/other", exp: EncodingFormatJSON},
}
for _, tt := range tests {
t.Run(tt.s, func(t *testing.T) {
got := EncodingFormatFromMimeType(tt.s)
assert.Equal(t, tt.exp, got)
})
}
}