Remove dump from client and handler

pull/3591/head
gunnaraasen 2015-08-07 11:56:30 -07:00
parent bd512762db
commit 7dc7389e96
2 changed files with 0 additions and 156 deletions

View File

@ -5,7 +5,6 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
@ -311,34 +310,6 @@ func (c *Client) Ping() (time.Duration, string, error) {
return time.Since(now), version, nil
}
// Dump connects to server and retrieves all data stored for specified database.
// If successful, Dump returns the entire response body, which is an io.ReadCloser
func (c *Client) Dump(db string) (io.ReadCloser, error) {
u := c.url
u.Path = "dump"
values := u.Query()
values.Set("db", db)
u.RawQuery = values.Encode()
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.userAgent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return resp.Body, fmt.Errorf("HTTP Protocol error %d", resp.StatusCode)
}
return resp.Body, nil
}
// Structs
// Result represents a resultset returned from a single statement.

View File

@ -114,10 +114,6 @@ func NewHandler(requireAuthentication, loggingEnabled, writeTrace bool) *Handler
"process_continuous_queries",
"POST", "/data/process_continuous_queries", false, false, h.serveProcessContinuousQueries,
},
// route{
// "dump", // export all points in the given db.
// "GET", "/dump", true, true, h.serveDump,
// },
})
return h
@ -801,129 +797,6 @@ func (r *Response) Error() error {
return nil
}
/*
FIXME: Convert to line protocol format.
// serveDump returns all points in the given database as a plaintext list of JSON structs.
// To get all points:
// Find all measurements (show measurements).
// For each measurement do select * from <measurement> group by *
func (h *Handler) serveDump(w http.ResponseWriter, r *http.Request, user *meta.UserInfo) {
q := r.URL.Query()
db := q.Get("db")
pretty := q.Get("pretty") == "true"
delim := []byte("\n")
measurements, err := h.showMeasurements(db, user)
if err != nil {
httpError(w, "error with dump: "+err.Error(), pretty, http.StatusInternalServerError)
return
}
// Fetch all the points for each measurement.
// From the 'select' query below, we get:
//
// columns:[col1, col2, col3, ...]
// - and -
// values:[[val1, val2, val3, ...], [val1, val2, val3, ...], [val1, val2, val3, ...]...]
//
// We need to turn that into multiple rows like so...
// fields:{col1 : values[0][0], col2 : values[0][1], col3 : values[0][2]}
// fields:{col1 : values[1][0], col2 : values[1][1], col3 : values[1][2]}
// fields:{col1 : values[2][0], col2 : values[2][1], col3 : values[2][2]}
//
for _, measurement := range measurements {
queryString := fmt.Sprintf("select * from %s group by *", measurement)
p := influxql.NewParser(strings.NewReader(queryString))
query, err := p.ParseQuery()
if err != nil {
httpError(w, "error with dump: "+err.Error(), pretty, http.StatusInternalServerError)
return
}
res, err := h.QueryExecutor.ExecuteQuery(query, db, DefaultChunkSize)
if err != nil {
w.Write([]byte("*** SERVER-SIDE ERROR. MISSING DATA ***"))
w.Write(delim)
return
}
for result := range res {
for _, row := range result.Series {
points := make([]Point, 1)
var point Point
point.Name = row.Name
point.Tags = row.Tags
point.Fields = make(map[string]interface{})
for _, tuple := range row.Values {
for subscript, cell := range tuple {
if row.Columns[subscript] == "time" {
point.Time, _ = cell.(time.Time)
continue
}
point.Fields[row.Columns[subscript]] = cell
}
points[0] = point
batch := &Batch{
Points: points,
Database: db,
RetentionPolicy: "default",
}
buf, err := json.Marshal(&batch)
// TODO: Make this more legit in the future
// Since we're streaming data as chunked responses, this error could
// be in the middle of an already-started data stream. Until Go 1.5,
// we can't really support proper trailer headers, so we'll just
// wait until then: https://code.google.com/p/go/issues/detail?id=7759
if err != nil {
w.Write([]byte("*** SERVER-SIDE ERROR. MISSING DATA ***"))
w.Write(delim)
return
}
w.Write(buf)
w.Write(delim)
}
}
}
}
}
// Return all the measurements from the given DB
func (h *Handler) showMeasurements(db string, user *meta.UserInfo) ([]string, error) {
var measurements []string
c, err := h.QueryExecutor.ExecuteQuery(&influxql.Query{Statements: []influxql.Statement{&influxql.ShowMeasurementsStatement{}}}, db, 0)
if err != nil {
return measurements, err
}
results := Response{}
for r := range c {
results.Results = append(results.Results, r)
}
for _, result := range results.Results {
for _, row := range result.Series {
for _, tuple := range (*row).Values {
for _, cell := range tuple {
measurements = append(measurements, interfaceToString(cell))
}
}
}
}
return measurements, nil
}
func interfaceToString(v interface{}) string {
switch t := v.(type) {
case nil:
return ""
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr:
return fmt.Sprintf("%d", t)
default:
return fmt.Sprintf("%v", t)
}
}
*/
// NormalizeBatchPoints returns a slice of Points, created by populating individual
// points within the batch, which do not have times or tags, with the top-level
// values.